From 8af605cf3d6ee978fe6c457aed357e8d65f19218 Mon Sep 17 00:00:00 2001 From: Reed Allman Date: Mon, 26 Mar 2018 15:43:49 -0700 Subject: [PATCH] update thrift, opencensus, others (#893) * update thrift, opencensus, others * stats: update to opencensus 0.6.0 view api --- Gopkg.lock | 54 +- Gopkg.toml | 6 +- api/agent/agent.go | 2 +- api/agent/drivers/docker/docker_client.go | 8 +- api/agent/state_trackers.go | 4 +- api/agent/stats.go | 16 +- api/logs/s3/s3.go | 4 +- vendor/git.apache.org/thrift.git/.gitignore | 26 +- vendor/git.apache.org/thrift.git/.travis.yml | 47 +- vendor/git.apache.org/thrift.git/CHANGES | 290 - vendor/git.apache.org/thrift.git/LANGUAGES.md | 91 +- vendor/git.apache.org/thrift.git/Makefile.am | 6 +- vendor/git.apache.org/thrift.git/README.md | 2 +- .../git.apache.org/thrift.git/Thrift.podspec | 4 +- vendor/git.apache.org/thrift.git/appveyor.yml | 57 +- .../build/appveyor/CYGW-appveyor-build.bat | 36 + .../build/appveyor/CYGW-appveyor-install.bat | 34 + .../build/appveyor/CYGW-appveyor-test.bat | 21 + .../build/appveyor/MING-appveyor-build.bat | 11 +- .../build/appveyor/MING-appveyor-install.bat | 42 +- .../build/appveyor/MING-appveyor-test.bat | 10 +- .../build/appveyor/MSVC-appveyor-install.bat | 46 +- .../build/appveyor/MSVC-appveyor-test.bat | 13 +- .../build/appveyor/MSYS-appveyor-build.bat | 3 +- .../build/appveyor/MSYS-appveyor-install.bat | 21 +- .../build/appveyor/build-libevent.bat | 17 +- .../thrift.git/build/appveyor/build-zlib.bat | 15 +- .../build/appveyor/cl_setcompiler.bat | 10 +- .../thrift.git/build/appveyor/cl_setenv.bat | 125 +- .../build/appveyor/cl_setgenerator.bat | 11 +- .../thrift.git/build/appveyor/cl_showenv.bat | 15 +- .../build/appveyor/simulate-appveyor.bat | 2 +- .../build/cmake/ConfigureChecks.cmake | 4 +- .../build/cmake/DefineCMakeDefaults.cmake | 2 +- .../build/cmake/DefineOptions.cmake | 6 +- .../build/cmake/DefinePlatformSpecifc.cmake | 1 + .../thrift.git/build/cmake/FindGradlew.cmake | 36 + .../thrift.git/build/cmake/config.h.in | 10 +- .../thrift.git/build/docker/README.md | 98 +- .../thrift.git/build/docker/scripts/sca.sh | 6 +- .../build/docker/ubuntu-artful/Dockerfile | 138 +- .../build/docker/ubuntu-trusty/Dockerfile | 73 +- .../docker/ubuntu-trusty/Dockerfile.orig | 231 - .../build/docker/ubuntu-xenial/Dockerfile | 101 +- .../thrift.git/compiler/cpp/Makefile.am | 8 +- .../thrift.git/compiler/cpp/README.md | 158 +- .../thrift.git/compiler/cpp/compiler.vcxproj | 9 +- .../compiler/cpp/src/thrift/audit/t_audit.cpp | 4 +- .../src/thrift/generate/t_as3_generator.cc | 8 +- .../src/thrift/generate/t_c_glib_generator.cc | 8 +- .../cpp/src/thrift/generate/t_cl_generator.cc | 558 + .../src/thrift/generate/t_cocoa_generator.cc | 16 +- .../src/thrift/generate/t_cpp_generator.cc | 16 +- .../src/thrift/generate/t_csharp_generator.cc | 10 +- .../cpp/src/thrift/generate/t_d_generator.cc | 8 +- .../src/thrift/generate/t_dart_generator.cc | 8 +- .../src/thrift/generate/t_delphi_generator.cc | 191 +- .../src/thrift/generate/t_erl_generator.cc | 46 +- .../cpp/src/thrift/generate/t_generator.cc | 13 +- .../cpp/src/thrift/generate/t_generator.h | 29 +- .../cpp/src/thrift/generate/t_go_generator.cc | 105 +- .../cpp/src/thrift/generate/t_gv_generator.cc | 4 +- .../src/thrift/generate/t_haxe_generator.cc | 8 +- .../cpp/src/thrift/generate/t_hs_generator.cc | 8 +- .../src/thrift/generate/t_html_generator.cc | 8 +- .../src/thrift/generate/t_java_generator.cc | 18 +- .../src/thrift/generate/t_javame_generator.cc | 8 +- .../cpp/src/thrift/generate/t_js_generator.cc | 120 +- .../src/thrift/generate/t_json_generator.cc | 4 +- .../src/thrift/generate/t_lua_generator.cc | 8 +- .../thrift/generate/t_netcore_generator.cc | 444 +- .../src/thrift/generate/t_netcore_generator.h | 137 + .../src/thrift/generate/t_ocaml_generator.cc | 8 +- .../src/thrift/generate/t_perl_generator.cc | 8 +- .../src/thrift/generate/t_php_generator.cc | 430 +- .../cpp/src/thrift/generate/t_py_generator.cc | 14 +- .../cpp/src/thrift/generate/t_rb_generator.cc | 8 +- .../cpp/src/thrift/generate/t_rs_generator.cc | 4 +- .../cpp/src/thrift/generate/t_st_generator.cc | 8 +- .../src/thrift/generate/t_swift_generator.cc | 16 +- .../src/thrift/generate/t_xml_generator.cc | 4 +- .../thrift-t_php_generator.o-a60a38e9 | 0 .../compiler/cpp/src/thrift/main.cc | 8 +- .../cpp/src/thrift/parse/t_const_value.h | 62 +- .../compiler/cpp/src/thrift/parse/t_scope.h | 12 +- .../compiler/cpp/src/thrift/plugin/plugin.cc | 23 +- .../cpp/src/thrift/plugin/plugin.thrift | 9 +- .../cpp/src/thrift/plugin/plugin_output.cc | 74 +- .../cpp/src/thrift/plugin/type_util.h | 1 + .../compiler/cpp/src/thrift/thrifty.yy | 1 + .../thrift.git/compiler/cpp/test/Makefile.am | 13 +- .../thrift.git/compiler/cpp/test/bincat.sh | 3 + .../cpp/test/plugin/conversion_test.cc | 6 +- .../cpp/test/plugin_stability_test.sh | 32 + .../compiler/cpp/tests/CMakeLists.txt | 153 + .../thrift.git/compiler/cpp/tests/README.md | 88 + .../compiler/cpp/tests/catch/catch.hpp | 11508 ++++++++++++++ .../t_netcore_generator_functional_tests.cc | 339 + ...core_generator_functional_tests_helpers.cc | 237 + ...tcore_generator_functional_tests_helpers.h | 34 + .../t_netcore_generator_helpers_tests.cc | 209 + ..._netcore_generator_initialization_tests.cc | 74 + .../compiler/cpp/tests/tests_main.cc | 19 + .../git.apache.org/thrift.git/composer.json | 13 +- vendor/git.apache.org/thrift.git/configure.ac | 39 +- .../contrib/Rebus/Properties/AssemblyInfo.cs | 4 +- .../contrib/fb303/cpp/FacebookBase.h | 1 + .../contrib/fb303/java/build.properties | 5 + .../thrift.git/contrib/fb303/java/build.xml | 30 +- .../thrift.git/contrib/fb303/py/setup.py | 2 +- .../contrib/thrift-maven-plugin/pom.xml | 2 +- .../contrib/vagrant/centos-6.5/README.md | 2 +- .../contrib/zeromq/csharp/ThriftZMQ.csproj | 20 +- .../thrift.git/debian/changelog | 3 +- .../git.apache.org/thrift.git/debian/control | 6 +- vendor/git.apache.org/thrift.git/debian/rules | 4 +- vendor/git.apache.org/thrift.git/doap.rdf | 5 - .../thrift.git/doc/install/debian.md | 30 +- .../git.apache.org/thrift.git/lib/Makefile.am | 4 + .../thrift.git/lib/as3/build.properties | 5 + .../thrift.git/lib/as3/build.xml | 38 +- .../thrift.git/lib/cl/Makefile.am | 40 + .../thrift.git/lib/cl/README.md | 253 + .../lib/cl/READMES/readme-cassandra.lisp | 64 + .../thrift.git/lib/cl/ensure-externals.sh | 16 + .../thrift.git/lib/cl/load-locally.lisp | 23 + .../lib/cl/test/make-test-binary.lisp | 31 + .../thrift.git/lib/cocoa/src/Thrift.h | 2 +- .../thrift.git/lib/cocoa/src/protocol/TBase.h | 4 +- .../thrift.git/lib/cpp/Makefile.am | 2 +- .../thrift.git/lib/cpp/src/thrift/TOutput.cpp | 2 +- .../src/thrift/async/TEvhttpClientChannel.cpp | 7 +- .../src/thrift/async/TEvhttpClientChannel.h | 4 +- .../src/thrift/server/TNonblockingServer.cpp | 25 +- .../cpp/src/thrift/transport/PlatformSocket.h | 4 + .../thrift/transport/TBufferTransports.cpp | 4 +- .../src/thrift/transport/THttpTransport.cpp | 6 +- .../cpp/src/thrift/transport/TSSLSocket.cpp | 19 + .../lib/cpp/src/thrift/transport/TSSLSocket.h | 1 + .../lib/cpp/src/thrift/transport/TSocket.cpp | 35 +- .../lib/cpp/src/thrift/transport/TSocket.h | 15 +- .../lib/cpp/src/thrift/windows/config.h | 1 - .../thrift.git/lib/cpp/test/CMakeLists.txt | 14 +- .../thrift.git/lib/cpp/test/Makefile.am | 31 +- .../lib/cpp/test/OneWayHTTPTest.cpp | 242 + .../test/OneWayTest.thrift} | 52 +- .../cpp/test/RenderedDoubleConstantsTest.cpp | 122 + .../lib/cpp/test/TNonblockingServerTest.cpp | 2 +- .../lib/cpp/test/concurrency/Tests.cpp | 2 +- .../cpp/test/concurrency/TimerManagerTests.h | 34 +- .../ThriftMSBuildTask.csproj | 2 +- .../lib/csharp/src/Properties/AssemblyInfo.cs | 4 +- .../lib/csharp/src/Protocol/TJSONProtocol.cs | 5 - .../csharp/src/Server/TThreadPoolServer.cs | 116 +- .../lib/csharp/src/Server/TThreadedServer.cs | 83 +- .../lib/csharp/src/TApplicationException.cs | 5 + .../thrift.git/lib/csharp/src/Thrift.csproj | 2 +- .../test/JSON/Properties/AssemblyInfo.cs | 4 +- .../Multiplex/Client/MultiplexClient.csproj | 2 +- .../Client/Properties/AssemblyInfo.cs | 4 +- .../Multiplex/Server/MultiplexServer.csproj | 2 +- .../Server/Properties/AssemblyInfo.cs | 4 +- .../ThriftMVCTest/Properties/AssemblyInfo.cs | 4 +- .../thrift.git/lib/d/src/thrift/base.d | 2 +- .../lib/d/src/thrift/server/simple.d | 4 +- .../lib/d/src/thrift/server/taskpool.d | 4 +- .../lib/d/src/thrift/server/threaded.d | 4 +- .../lib/d/test/thrift_test_server.d | 58 +- .../lib/src/transport/t_framed_transport.dart | 120 +- .../lib/src/transport/t_socket_transport.dart | 4 +- .../thrift.git/lib/dart/pubspec.yaml | 4 +- .../transport/t_framed_transport_test.dart | 175 + .../delphi/src/Thrift.Processor.Multiplex.pas | 55 +- .../lib/delphi/src/Thrift.Stream.pas | 23 +- .../lib/delphi/src/Thrift.Transport.Pipes.pas | 46 +- .../lib/delphi/src/Thrift.Transport.pas | 63 +- .../lib/delphi/src/Thrift.Utils.pas | 18 +- .../thrift.git/lib/delphi/src/Thrift.pas | 2 +- .../ConsoleHelper.pas} | 9 +- .../thrift.git/lib/delphi/test/TestClient.pas | 131 +- .../thrift.git/lib/delphi/test/TestServer.pas | 2 +- .../lib/delphi/test/TestServerEvents.pas | 2 +- .../thrift.git/lib/delphi/test/client.dpr | 1 - .../multiplexed/Multiplex.Server.Main.pas | 2 +- .../multiplexed/Multiplex.Test.Client.dpr | 1 - .../multiplexed/Multiplex.Test.Server.dpr | 2 +- .../delphi/test/serializer/TestSerializer.dpr | 1 - .../thrift.git/lib/delphi/test/server.dpr | 1 - .../delphi/test/skip/skiptest_version1.dpr | 1 - .../delphi/test/skip/skiptest_version2.dpr | 1 - .../test/typeregistry/TestTypeRegistry.dpr | 1 - .../thrift.git/lib/erl/Makefile.am | 1 + .../test/test_rendered_double_constants.erl | 68 + .../thrift.git/lib/go/Makefile.am | 2 - .../thrift.git/lib/go/README.md | 2 + .../thrift.git/lib/go/test/Makefile.am | 5 - .../go/test/dontexportrwtest/compile_test.go | 9 +- .../lib/go/test/tests/client_error_test.go | 23 +- .../go17.go => lib/go/test/tests/context.go} | 8 +- .../test/tests/multiplexed_protocol_test.go | 13 + .../lib/go/test/tests/one_way_test.go | 8 + .../lib/go/test/tests/protocol_mock.go | 5 +- .../lib/go/test/tests/required_fields_test.go | 2 + .../lib/go/test/tests/thrifttest_handler.go | 5 +- .../go/test/tests/thrifttest_handler_go17.go | 212 - .../go/thrift/application_exception_test.go | 4 +- .../lib/go/thrift/binary_protocol.go | 5 +- .../lib/go/thrift/buffered_transport.go | 5 +- .../thrift.git/lib/go/thrift/client.go | 17 +- .../thrift.git/lib/go/thrift/client_go17.go | 13 - .../lib/go/thrift/client_pre_go17.go | 13 - .../{common_test_go17.go => common_test.go} | 2 - .../lib/go/thrift/compact_protocol.go | 7 +- .../lib/go/thrift/{go17.go => context.go} | 2 - .../lib/go/thrift/debug_protocol.go | 5 +- .../lib/go/thrift/framed_transport.go | 5 +- .../thrift.git/lib/go/thrift/http_client.go | 6 +- .../lib/go/thrift/http_transport.go | 12 + .../lib/go/thrift/http_transport_go17.go | 38 - .../lib/go/thrift/http_transport_pre_go17.go | 40 - .../lib/go/thrift/iostream_transport.go | 3 +- .../thrift.git/lib/go/thrift/json_protocol.go | 5 +- .../lib/go/thrift/json_protocol_test.go | 41 +- .../thrift.git/lib/go/thrift/memory_buffer.go | 3 +- .../lib/go/thrift/multiplexed_protocol.go | 31 + .../go/thrift/multiplexed_protocol_go17.go | 53 - .../thrift/multiplexed_protocol_pre_go17.go | 54 - .../thrift.git/lib/go/thrift/pre_go17.go | 26 - .../lib/go/thrift/processor_factory.go | 12 + .../thrift.git/lib/go/thrift/protocol.go | 3 +- .../thrift.git/lib/go/thrift/protocol_test.go | 51 +- .../thrift.git/lib/go/thrift/serializer.go | 16 +- .../lib/go/thrift/serializer_test.go | 5 +- .../lib/go/thrift/serializer_types_test.go | 2 +- .../lib/go/thrift/simple_json_protocol.go | 5 +- .../go/thrift/simple_json_protocol_test.go | 49 +- .../thrift.git/lib/go/thrift/socket.go | 3 +- .../thrift.git/lib/go/thrift/ssl_socket.go | 3 +- .../thrift.git/lib/go/thrift/transport.go | 9 +- .../lib/go/thrift/transport_test.go | 7 +- .../lib/go/thrift/zlib_transport.go | 5 +- .../thrift.git/lib/haxe/README.md | 6 +- .../thrift.git/lib/haxe/haxelib.json | 2 +- .../thrift.git/lib/hs/thrift.cabal | 2 +- .../thrift.git/lib/java/CMakeLists.txt | 71 +- .../thrift.git/lib/java/Makefile.am | 46 +- .../thrift.git/lib/java/README.md | 128 +- .../thrift.git/lib/java/android/build.gradle | 38 +- .../thrift.git/lib/java/build.gradle | 64 + .../thrift.git/lib/java/build.xml | 421 - .../code_quality_tools/findbugs-filter.xml | 51 + .../{build.properties => gradle.properties} | 26 +- .../gradle/additionalArtifacts.gradle} | 24 +- .../gradle/cloverCoverage.gradle} | 47 +- .../lib/java/gradle/codeQualityChecks.gradle | 40 + .../lib/java/gradle/environment.gradle | 73 + .../lib/java/gradle/functionalTests.gradle | 155 + .../lib/java/gradle/generateTestThrift.gradle | 109 + .../lib/java/gradle/publishing.gradle | 119 + .../java/gradle/sourceConfiguration.gradle | 84 + .../lib/java/gradle/unitTests.gradle | 82 + .../gradle/wrapper/gradle-wrapper.properties | 5 + .../thrift.git/lib/java/gradlew | 172 + .../thrift.git/lib/java/gradlew.bat | 84 + .../go17.go => lib/java/settings.gradle} | 8 +- .../apache/thrift/AsyncProcessFunction.java | 1 - .../org/apache/thrift/TAsyncProcessor.java | 9 +- .../src/org/apache/thrift/TEnumHelper.java | 1 - .../thrift/server/TExtensibleServlet.java | 6 +- .../thrift/server/TThreadPoolServer.java | 35 +- .../transport/TSSLTransportFactory.java | 68 +- .../thrift/transport/TSaslTransport.java | 4 +- .../java/test/org/apache/thrift/Fixtures.java | 2 +- .../thrift/TestRenderedDoubleConstants.java | 179 + .../test/org/apache/thrift/TestReuse.java | 3 - .../thrift/protocol/TestTProtocolUtil.java | 14 - .../protocol/TestTSimpleJSONProtocol.java | 2 +- .../apache/thrift/server/ServerTestBase.java | 7 +- .../transport/TestTSSLTransportFactory.java | 10 +- ...TestTSSLTransportFactoryStreamedStore.java | 62 + .../transport/TestTSimpleFileTransport.java | 18 +- .../thrift.git/lib/js/Gruntfile.js | 88 +- .../thrift.git/lib/js/package-lock.json | 2285 +++ .../thrift.git/lib/js/package.json | 10 +- .../thrift.git/lib/js/src/thrift.js | 29 +- .../thrift.git/lib/js/test/build.properties | 5 + .../thrift.git/lib/js/test/build.xml | 39 +- .../lib/js/test/deep-constructor.test.js | 22 +- .../thrift.git/lib/js/test/jsTestDriver.conf | 1 + .../thrift.git/lib/js/test/test-async.js | 8 + .../lib/js/test/test-deep-constructor.html | 6 +- .../lib/js/test/test-double-rendering.html | 55 + .../lib/js/test/test-double-rendering.js | 143 + .../thrift.git/lib/js/test/test-es6.html | 65 + .../thrift.git/lib/js/test/test-es6.js | 354 + .../thrift.git/lib/lua/THttpTransport.lua | 2 +- .../thrift.git/lib/netcore/Makefile.am | 90 +- .../thrift.git/lib/netcore/README.md | 3 + .../Tests/Thrift.IntegrationTests/.gitignore | 2 + .../Protocols/ProtocolsOperationsTests.cs | 502 + .../Thrift.IntegrationTests.csproj | 29 + .../.gitignore | 4 + ...rift.PublicInterfaces.Compile.Tests.csproj | 15 + .../Collections/TCollectionsTests.cs | 83 + .../Thrift.Tests/Collections/THashSetTests.cs | 71 + .../Protocols/TJsonProtocolHelperTests.cs | 172 + .../Protocols/TJsonProtocolTests.cs | 67 + .../Tests/Thrift.Tests/Thrift.Tests.csproj | 18 + .../thrift.git/lib/netcore/Thrift.sln | 60 +- .../netcore/Thrift/Properties/AssemblyInfo.cs | 4 +- .../netcore/Thrift/Protocols/TAbstractBase.cs | 2 +- .../lib/netcore/Thrift/Protocols/TBase.cs | 2 +- .../Thrift/Protocols/TBinaryProtocol.cs | 19 +- .../Thrift/Protocols/TCompactProtocol.cs | 10 +- .../netcore/Thrift/Protocols/TJSONProtocol.cs | 393 +- .../Thrift/Protocols/TMultiplexedProtocol.cs | 13 +- .../lib/netcore/Thrift/Protocols/TProtocol.cs | 17 +- .../Thrift/Protocols/TProtocolDecorator.cs | 17 +- .../Thrift/Protocols/TProtocolException.cs | 5 +- .../Protocols/Utilities/TBase64Helper.cs | 101 + .../Utilities/TJsonProtocolConstants.cs | 61 + .../Utilities/TJsonProtocolHelper.cs | 176 + .../Protocols/Utilities/TProtocolUtil.cs | 54 +- .../netcore/Thrift/Server/AsyncBaseServer.cs | 23 +- .../lib/netcore/Thrift/Server/TBaseServer.cs | 19 +- .../netcore/Thrift/TApplicationException.cs | 43 +- .../lib/netcore/Thrift/TBaseClient.cs | 19 +- .../Client/TBufferedClientTransport.cs | 14 +- .../Client/TFramedClientTransport.cs | 14 +- .../Transports/Client/THttpClientTransport.cs | 6 +- .../Client/TSocketClientTransport.cs | 7 +- .../Client/TTlsSocketClientTransport.cs | 7 +- .../Transports/Server/THttpServerTransport.cs | 21 +- .../Server/TNamedPipeServerTransport.cs | 2 +- .../Server/TServerSocketTransport.cs | 18 +- .../Server/TTlsServerSocketTransport.cs | 27 +- .../Thrift/Transports/TClientTransport.cs | 3 +- .../Thrift/Transports/TServerTransport.cs | 2 +- .../thrift.git/lib/netcore/build.cmd | 27 + .../thrift.git/lib/netcore/build.sh | 32 + .../thrift.git/lib/netcore/runtests.cmd | 28 + .../thrift.git/lib/netcore/runtests.sh | 26 + .../thrift.git/lib/nodejs/README.md | 2 +- .../lib/nodejs/lib/thrift/binary_protocol.js | 5 +- .../lib/nodejs/lib/thrift/connection.js | 14 +- .../lib/nodejs/lib/thrift/http_connection.js | 23 +- .../thrift.git/lib/nodejs/lib/thrift/index.js | 2 + .../lib/nodejs/lib/thrift/json_protocol.js | 62 +- .../lib/nodejs/test/browser_client.js | 45 - .../thrift.git/lib/nodejs/test/client.js | 18 +- .../thrift.git/lib/nodejs/test/exceptions.js | 14 + .../thrift.git/lib/nodejs/test/server.js | 18 +- .../thrift.git/lib/nodejs/test/testAll.sh | 14 - .../thrift.git/lib/ocaml/_oasis | 2 +- .../thrift.git/lib/perl/MANIFEST.SKIP | 13 + .../thrift.git/lib/perl/Makefile.PL | 5 + .../thrift.git/lib/perl/build-cpan-dist.sh | 48 +- .../thrift.git/lib/perl/lib/Thrift.pm | 2 +- .../thrift.git/lib/perl/lib/Thrift/Server.pm | 38 +- .../lib/perl/lib/Thrift/ServerSocket.pm | 15 +- .../thrift.git/lib/perl/tools/FixupDist.pl | 35 + .../thrift.git/lib/php/Makefile.am | 106 +- .../thrift.git/lib/php/README.md | 33 +- .../thrift.git/lib/php/coding_standards.md | 6 +- .../thrift.git/lib/php/lib/Base/TBase.php | 382 + .../ClassLoader/ThriftClassLoader.php | 46 +- .../lib/Exception/TApplicationException.php | 76 + .../lib/php/lib/Exception/TException.php | 384 + .../Exception/TProtocolException.php | 22 +- .../Exception/TTransportException.php | 18 +- .../Factory/TBinaryProtocolFactory.php | 22 +- .../Factory/TCompactProtocolFactory.php | 14 +- .../Factory/TJSONProtocolFactory.php | 0 .../{Thrift => }/Factory/TProtocolFactory.php | 12 +- .../Factory/TStringFuncFactory.php | 18 +- .../lib/php/lib/Factory/TTransportFactory.php | 18 + .../Protocol/JSON/BaseContext.php | 0 .../Protocol/JSON/ListContext.php | 0 .../Protocol/JSON/LookaheadReader.php | 0 .../Protocol/JSON/PairContext.php | 0 .../SimpleJSON/CollectionMapKeyException.php | 0 .../Protocol/SimpleJSON/Context.php | 1 - .../Protocol/SimpleJSON/ListContext.php | 0 .../Protocol/SimpleJSON/MapContext.php | 4 - .../Protocol/SimpleJSON/StructContext.php | 5 +- .../lib/php/lib/Protocol/TBinaryProtocol.php | 453 + .../Protocol/TBinaryProtocolAccelerated.php | 67 + .../lib/php/lib/Protocol/TCompactProtocol.php | 739 + .../{Thrift => }/Protocol/TJSONProtocol.php | 100 +- .../Protocol/TMultiplexedProtocol.php | 0 .../lib/php/lib/Protocol/TProtocol.php | 350 + .../Protocol/TProtocolDecorator.php | 1 + .../Protocol/TSimpleJSONProtocol.php | 9 +- .../php/lib/Serializer/TBinarySerializer.php | 87 + .../lib/php/lib/Server/TForkingServer.php | 125 + .../lib/php/lib/Server/TSSLServerSocket.php | 97 + .../thrift.git/lib/php/lib/Server/TServer.php | 102 + .../lib/php/lib/Server/TServerSocket.php | 124 + .../lib/php/lib/Server/TServerTransport.php | 56 + .../lib/php/lib/Server/TSimpleServer.php | 60 + .../lib/php/lib/StoredMessageProtocol.php | 53 + .../php/lib/{Thrift => }/StringFunc/Core.php | 0 .../lib/{Thrift => }/StringFunc/Mbstring.php | 0 .../{Thrift => }/StringFunc/TStringFunc.php | 0 .../{Thrift => }/TMultiplexedProcessor.php | 29 +- .../lib/php/lib/Thrift/Base/TBase.php | 380 - .../Exception/TApplicationException.php | 76 - .../php/lib/Thrift/Exception/TException.php | 383 - .../lib/Thrift/Factory/TTransportFactory.php | 18 - .../lib/Thrift/Protocol/TBinaryProtocol.php | 453 - .../Protocol/TBinaryProtocolAccelerated.php | 65 - .../lib/Thrift/Protocol/TCompactProtocol.php | 739 - .../lib/php/lib/Thrift/Protocol/TProtocol.php | 352 - .../Thrift/Serializer/TBinarySerializer.php | 85 - .../php/lib/Thrift/Server/TForkingServer.php | 120 - .../lib/Thrift/Server/TSSLServerSocket.php | 94 - .../lib/php/lib/Thrift/Server/TServer.php | 100 - .../php/lib/Thrift/Server/TServerSocket.php | 122 - .../lib/Thrift/Server/TServerTransport.php | 56 - .../php/lib/Thrift/Server/TSimpleServer.php | 58 - .../Thrift/Transport/TBufferedTransport.php | 181 - .../php/lib/Thrift/Transport/TCurlClient.php | 249 - .../lib/Thrift/Transport/TFramedTransport.php | 193 - .../php/lib/Thrift/Transport/THttpClient.php | 229 - .../lib/Thrift/Transport/TMemoryBuffer.php | 100 - .../php/lib/Thrift/Transport/TPhpStream.php | 123 - .../php/lib/Thrift/Transport/TSSLSocket.php | 112 - .../lib/php/lib/Thrift/Transport/TSocket.php | 340 - .../php/lib/Thrift/Transport/TSocketPool.php | 300 - .../php/lib/Thrift/Transport/TTransport.php | 95 - .../php/lib/Transport/TBufferedTransport.php | 206 + .../lib/php/lib/Transport/TCurlClient.php | 248 + .../php/lib/Transport/TFramedTransport.php | 192 + .../lib/php/lib/Transport/THttpClient.php | 243 + .../lib/php/lib/Transport/TMemoryBuffer.php | 106 + .../{Thrift => }/Transport/TNullTransport.php | 29 +- .../lib/php/lib/Transport/TPhpStream.php | 124 + .../lib/php/lib/Transport/TSSLSocket.php | 117 + .../lib/php/lib/Transport/TSocket.php | 366 + .../lib/php/lib/Transport/TSocketPool.php | 310 + .../lib/php/lib/Transport/TTransport.php | 98 + .../php/lib/{Thrift => }/Type/TConstant.php | 8 +- .../lib/{Thrift => }/Type/TMessageType.php | 8 +- .../lib/php/lib/{Thrift => }/Type/TType.php | 34 +- .../thrift.git/lib/php/test/Fixtures.php | 194 + .../test/JsonSerialize/JsonSerializeTest.php | 116 + .../thrift.git/lib/php/test/Makefile.am | 38 +- .../BinarySerializerTest.php} | 48 +- .../test/Protocol/TJSONProtocolFixtures.php | 74 + .../php/test/Protocol/TJSONProtocolTest.php | 518 + .../Protocol/TSimpleJSONProtocolFixtures.php | 67 + .../test/Protocol/TSimpleJSONProtocolTest.php | 254 + .../lib/php/test/Test/Thrift/Fixtures.php | 194 - .../JsonSerialize/JsonSerializeTest.php | 116 - .../Thrift/Protocol/TestTJSONProtocol.php | 583 - .../Protocol/TestTSimpleJSONProtocol.php | 300 - .../php/test/Test/Thrift/TestValidators.php | 156 - .../php/test/Validator/BaseValidatorTest.php | 154 + .../test/Validator/ValidatorTest.php} | 31 +- .../test/Validator/ValidatorTestOop.php} | 31 +- .../git.apache.org/thrift.git/lib/py/setup.py | 7 +- .../lib/py/src/server/TNonblockingServer.py | 2 +- .../rb/lib/thrift/protocol/base_protocol.rb | 10 +- .../rb/lib/thrift/protocol/binary_protocol.rb | 9 +- .../protocol/binary_protocol_accelerated.rb | 8 + .../lib/thrift/protocol/compact_protocol.rb | 8 + .../rb/lib/thrift/protocol/json_protocol.rb | 8 + .../thrift/protocol/multiplexed_protocol.rb | 6 +- .../lib/rb/lib/thrift/server/base_server.rb | 10 +- .../lib/rb/lib/thrift/server/simple_server.rb | 6 +- .../lib/thrift/server/thread_pool_server.rb | 6 +- .../rb/lib/thrift/server/threaded_server.rb | 6 +- .../thrift/transport/base_server_transport.rb | 2 +- .../rb/lib/thrift/transport/base_transport.rb | 8 + .../thrift/transport/buffered_transport.rb | 10 +- .../lib/thrift/transport/framed_transport.rb | 10 +- .../thrift/transport/http_client_transport.rb | 4 + .../thrift/transport/io_stream_transport.rb | 5 +- .../transport/memory_buffer_transport.rb | 4 + .../rb/lib/thrift/transport/server_socket.rb | 7 +- .../lib/rb/lib/thrift/transport/socket.rb | 6 +- .../lib/thrift/transport/ssl_server_socket.rb | 4 + .../lib/rb/lib/thrift/transport/ssl_socket.rb | 4 + .../thrift/transport/unix_server_socket.rb | 6 +- .../rb/lib/thrift/transport/unix_socket.rb | 6 +- .../lib/rb/spec/base_protocol_spec.rb | 149 +- .../lib/rb/spec/base_transport_spec.rb | 272 +- .../spec/binary_protocol_accelerated_spec.rb | 8 +- .../lib/rb/spec/binary_protocol_spec.rb | 24 +- .../rb/spec/binary_protocol_spec_shared.rb | 143 +- .../thrift.git/lib/rb/spec/bytes_spec.rb | 76 +- .../thrift.git/lib/rb/spec/client_spec.rb | 83 +- .../lib/rb/spec/compact_protocol_spec.rb | 31 +- .../thrift.git/lib/rb/spec/exception_spec.rb | 108 +- .../thrift.git/lib/rb/spec/flat_spec.rb | 10 +- .../lib/rb/spec/http_client_spec.rb | 80 +- .../lib/rb/spec/json_protocol_spec.rb | 284 +- .../thrift.git/lib/rb/spec/namespaced_spec.rb | 10 +- .../lib/rb/spec/nonblocking_server_spec.rb | 32 +- .../thrift.git/lib/rb/spec/processor_spec.rb | 52 +- .../thrift.git/lib/rb/spec/serializer_spec.rb | 40 +- .../lib/rb/spec/server_socket_spec.rb | 49 +- .../thrift.git/lib/rb/spec/server_spec.rb | 144 +- .../thrift.git/lib/rb/spec/socket_spec.rb | 39 +- .../lib/rb/spec/socket_spec_shared.rb | 62 +- .../lib/rb/spec/ssl_server_socket_spec.rb | 34 + .../thrift.git/lib/rb/spec/ssl_socket_spec.rb | 48 +- .../rb/spec/struct_nested_containers_spec.rb | 48 +- .../thrift.git/lib/rb/spec/struct_spec.rb | 240 +- .../lib/rb/spec/thin_http_server_spec.rb | 36 +- .../thrift.git/lib/rb/spec/types_spec.rb | 105 +- .../thrift.git/lib/rb/spec/union_spec.rb | 84 +- .../lib/rb/spec/unix_socket_spec.rb | 77 +- .../thrift.git/lib/rb/thrift.gemspec | 21 +- .../thrift.git/lib/rs/Cargo.toml | 4 +- .../thrift.git/lib/rs/src/protocol/binary.rs | 9 +- .../thrift.git/lib/rs/src/protocol/compact.rs | 11 + .../lib/rs/src/transport/buffered.rs | 76 +- .../thrift.git/lib/rs/src/transport/framed.rs | 318 +- .../thrift.git/lib/rs/src/transport/socket.rs | 2 +- .../thrift.git/lib/st/package.xml | 2 +- .../thrift.git/package-lock.json | 2028 +++ vendor/git.apache.org/thrift.git/package.json | 18 +- .../git.apache.org/thrift.git/phpcs.xml.dist | 25 + .../thrift.git/sonar-project.properties | 8 +- .../test/DoubleConstantsTest.thrift | 17 + .../thrift.git/test/Makefile.am | 7 + .../thrift.git/test/ThriftTest.thrift | 2 +- .../thrift.git/test/cl/Makefile.am | 42 + .../thrift.git/test/cl/implementation.lisp | 136 + .../thrift.git/test/cl/make-test-client.lisp | 93 + .../thrift.git/test/cl/make-test-server.lisp | 80 + .../thrift.git/test/cl/tests.lisp | 240 + .../thrift.git/test/cpp/src/TestServer.cpp | 64 +- .../thrift.git/test/crossrunner/collect.py | 4 +- .../thrift.git/test/crossrunner/report.py | 13 +- .../thrift.git/test/crossrunner/run.py | 204 +- .../thrift.git/test/crossrunner/test.py | 10 +- .../thrift.git/test/crossrunner/util.py | 4 + .../test/csharp/Properties/AssemblyInfo.cs | 4 +- .../thrift.git/test/csharp/TestClient.cs | 4 +- .../thrift.git/test/csharp/TestServer.cs | 4 +- .../thrift.git/test/csharp/ThriftTest.csproj | 2 +- .../test/dart/test_client/pubspec.yaml | 2 +- .../test/erl/src/thrift_test.app.src | 2 +- .../test/features/known_failures_Linux.json | 94 +- .../thrift.git/test/go/Makefile.am | 4 - .../thrift.git/test/go/src/bin/stress/go17.go | 62 - .../thrift.git/test/go/src/bin/stress/main.go | 36 + .../test/go/src/bin/stress/pre_go17.go | 63 - .../test/go/src/bin/testclient/main.go | 2 + .../test/go/src/bin/testclient/pre_go17.go | 26 - .../test/go/src/common/clientserver_test.go | 46 +- .../test/go/src/common/context_test.go | 98 + .../thrift.git/test/go/src/common/go17.go | 26 - .../thrift.git/test/go/src/common/pre_go17.go | 26 - .../test/go/src/common/printing_handler.go | 11 +- .../go/src/common/printing_handler_go17.go | 386 - .../thrift.git/test/haxe/make_all.sh | 2 +- .../thrift.git/test/known_failures_Linux.json | 119 +- .../thrift.git/test/netcore/Client/.gitignore | 2 + .../Client.csproj} | 16 +- .../netcore/{ThriftTest => Client}/Program.cs | 14 +- .../Properties/AssemblyInfo.cs | 2 +- .../{ThriftTest => Client}/TestClient.cs | 77 +- .../thrift.git/test/netcore/Makefile.am | 46 +- .../thrift.git/test/netcore/README.md | 3 + .../thrift.git/test/netcore/Server/.gitignore | 2 + .../thrift.git/test/netcore/Server/Program.cs | 72 + .../netcore/Server/Properties/AssemblyInfo.cs | 43 + .../test/netcore/Server/Server.csproj | 31 + .../{ThriftTest => Server}/TestServer.cs | 82 +- .../thrift.git/test/netcore/ThriftTest.sln | 44 +- .../ThriftTest/Properties/launchSettings.json | 7 - .../thrift.git/test/netcore/build.cmd | 5 - .../thrift.git/test/netcore/build.sh | 5 - .../thrift.git/test/perl/TestServer.pl | 8 + .../thrift.git/test/php/Makefile.am | 6 +- .../php/{TestPsr4.php => TestClassmap.php} | 2 +- .../thrift.git/test/php/TestClient.php | 15 +- .../thrift.git/test/py/Makefile.am | 7 + .../thrift.git/test/py/RunClientServer.py | 1 + .../test/py/TestRenderedDoubleConstants.py | 177 + .../thrift.git/test/py/generate.cmake | 7 + .../git.apache.org/thrift.git/test/rb/Gemfile | 6 +- .../test/rb/integration/TestClient.rb | 42 +- .../test/rb/integration/TestServer.rb | 57 +- vendor/git.apache.org/thrift.git/test/test.py | 21 +- .../git.apache.org/thrift.git/test/tests.json | 75 +- .../thrift.git/tutorial/Makefile.am | 4 + .../thrift.git/tutorial/cl/Makefile.am | 47 + .../tutorial/cl/make-tutorial-client.lisp | 51 + .../tutorial/cl/make-tutorial-server.lisp | 29 + .../tutorial/cl/shared-implementation.lisp | 25 + .../tutorial/cl/thrift-tutorial.asd | 17 + .../tutorial/cl/tutorial-implementation.lisp | 41 + .../CsharpClient/Properties/AssemblyInfo.cs | 4 +- .../CsharpServer/Properties/AssemblyInfo.cs | 4 +- .../tutorial/dart/client/pubspec.yaml | 4 +- .../tutorial/dart/console_client/pubspec.yaml | 2 +- .../tutorial/dart/server/pubspec.yaml | 2 +- .../delphi/DelphiClient/DelphiClient.dproj | 4 +- .../delphi/DelphiServer/DelphiServer.dproj | 4 +- .../thrift.git/tutorial/go/Makefile.am | 10 - .../thrift.git/tutorial/go/src/client.go | 3 + .../thrift.git/tutorial/go/src/handler.go | 5 +- .../tutorial/go/src/handler_go17.go | 104 - .../thrift.git/tutorial/go/src/pre_go17.go | 26 - .../tutorial/hs/ThriftTutorial.cabal | 2 +- .../thrift.git/tutorial/java/build.xml | 12 +- .../thrift.git/tutorial/js/build.xml | 12 +- .../tutorial/netcore/Client/Program.cs | 20 +- .../tutorial/netcore/Interfaces/.gitignore | 3 + .../netcore/Interfaces/Interfaces.csproj | 10 + .../thrift.git/tutorial/netcore/Makefile.am | 49 +- .../thrift.git/tutorial/netcore/README.md | 27 +- .../tutorial/netcore/Server/Program.cs | 27 +- .../thrift.git/tutorial/netcore/Tutorial.sln | 78 +- .../thrift.git/tutorial/netcore/build.cmd | 5 - .../thrift.git/tutorial/netcore/build.sh | 5 - .../thrift.git/tutorial/ocaml/_oasis | 2 +- .../thrift.git/tutorial/php/PhpClient.php | 2 +- .../thrift.git/tutorial/php/PhpServer.php | 2 +- .../thrift.git/tutorial/shared.thrift | 1 + .../thrift.git/tutorial/tutorial.thrift | 2 + .../govalidator/.github/ISSUE_TEMPLATE.md | 2 + .../asaskevich/govalidator/CONTRIBUTING.md | 39 +- .../asaskevich/govalidator/README.md | 43 +- .../asaskevich/govalidator/converter.go | 29 +- .../asaskevich/govalidator/numerics.go | 11 +- .../asaskevich/govalidator/numerics_test.go | 206 +- .../asaskevich/govalidator/patterns.go | 11 +- .../asaskevich/govalidator/utils.go | 4 +- .../govalidator/utils_benchmark_test.go | 17 + .../asaskevich/govalidator/utils_test.go | 3 +- .../asaskevich/govalidator/validator.go | 60 +- .../asaskevich/govalidator/validator_test.go | 196 +- .../beorn7/perks/quantile/stream.go | 34 +- .../containerd/continuity/fs/fstest/file.go | 13 + .../continuity/fs/fstest/testsuite.go | 1 + .../docker/docker/.github/CODEOWNERS | 9 +- vendor/github.com/docker/docker/Dockerfile | 32 +- vendor/github.com/docker/docker/MAINTAINERS | 19 +- vendor/github.com/docker/docker/TESTING.md | 4 +- .../api/server/middleware/debug_test.go | 5 +- .../api/server/middleware/version_test.go | 21 +- .../docker/api/types/filters/parse_test.go | 15 +- .../builder/dockerfile/buildargs_test.go | 24 +- .../docker/builder/dockerfile/builder.go | 15 +- .../docker/builder/dockerfile/builder_test.go | 9 +- .../docker/builder/dockerfile/copy_test.go | 9 +- .../docker/builder/dockerfile/dispatchers.go | 55 +- .../builder/dockerfile/dispatchers_test.go | 154 +- .../docker/builder/dockerfile/evaluator.go | 3 +- .../docker/builder/dockerfile/imagecontext.go | 12 +- .../dockerfile/instructions/commands.go | 2 + .../builder/dockerfile/instructions/parse.go | 4 +- .../dockerfile/instructions/parse_test.go | 29 +- .../docker/builder/dockerfile/internals.go | 10 +- .../dockerfile/internals_linux_test.go | 10 +- .../builder/dockerfile/internals_test.go | 16 +- .../dockerfile/internals_windows_test.go | 7 +- .../dockerfile/parser/line_parsers_test.go | 19 +- .../builder/dockerfile/parser/parser.go | 57 +- .../builder/dockerfile/parser/parser_test.go | 48 +- .../builder/dockerfile/shell/lex_test.go | 13 +- .../docker/builder/fscache/fscache_test.go | 75 +- .../remotecontext/git/gitutils_test.go | 107 +- .../builder/remotecontext/mimetype_test.go | 8 +- .../builder/remotecontext/remote_test.go | 18 +- .../docker/docker/client/client_test.go | 51 +- .../docker/client/config_create_test.go | 5 +- .../docker/client/config_inspect_test.go | 5 +- .../docker/docker/client/config_list_test.go | 5 +- .../docker/client/config_remove_test.go | 5 +- .../docker/client/config_update_test.go | 5 +- .../docker/client/container_prune_test.go | 13 +- .../docker/client/container_remove_test.go | 11 +- .../client/distribution_inspect_test.go | 5 +- .../github.com/docker/docker/client/hijack.go | 29 +- .../docker/docker/client/image_prune_test.go | 13 +- .../docker/docker/client/image_remove_test.go | 9 +- .../docker/client/network_inspect_test.go | 11 +- .../docker/client/network_prune_test.go | 9 +- .../docker/docker/client/ping_test.go | 27 +- .../docker/docker/client/request_test.go | 6 +- .../docker/client/secret_create_test.go | 5 +- .../docker/client/secret_inspect_test.go | 5 +- .../docker/docker/client/secret_list_test.go | 5 +- .../docker/client/secret_remove_test.go | 5 +- .../docker/client/secret_update_test.go | 5 +- .../docker/client/service_create_test.go | 11 +- .../docker/client/service_remove_test.go | 9 +- .../client/swarm_get_unlock_key_test.go | 8 +- .../docker/client/volume_inspect_test.go | 10 +- .../docker/cmd/dockerd/config_unix_test.go | 9 +- .../docker/docker/cmd/dockerd/daemon_test.go | 82 +- .../docker/docker/cmd/dockerd/daemon_unix.go | 4 - .../docker/cmd/dockerd/daemon_unix_test.go | 56 +- .../docker/docker/cmd/dockerd/docker.go | 44 +- .../docker/docker/cmd/dockerd/docker_unix.go | 8 + .../docker/cmd/dockerd/docker_windows.go | 33 + .../docker/docker/cmd/dockerd/options_test.go | 19 +- .../docker/cmd/dockerd/service_unsupported.go | 4 - .../docker/docker/container/archive.go | 7 + .../docker/docker/container/container.go | 3 + .../docker/container/container_unit_test.go | 18 +- .../docker/docker/container/view_test.go | 51 +- .../daemon/cluster/convert/service_test.go | 8 +- .../executor/container/container_test.go | 4 +- .../docker/daemon/cluster/noderunner.go | 7 +- .../docker/daemon/config/config_test.go | 19 +- .../docker/daemon/config/config_unix_test.go | 24 +- .../daemon/config/config_windows_test.go | 12 +- .../docker/daemon/container_unix_test.go | 6 +- .../docker/docker/daemon/create_test.go | 4 +- .../docker/docker/daemon/daemon_linux_test.go | 12 +- .../docker/docker/daemon/daemon_test.go | 7 +- .../docker/docker/daemon/daemon_unix_test.go | 6 +- .../docker/docker/daemon/delete_test.go | 4 +- .../docker/daemon/discovery/discovery_test.go | 20 +- .../github.com/docker/docker/daemon/exec.go | 2 +- .../docker/docker/daemon/exec_linux.go | 3 + .../docker/docker/daemon/exec_linux_test.go | 53 + .../github.com/docker/docker/daemon/export.go | 4 +- .../daemon/graphdriver/aufs/aufs_test.go | 38 +- .../daemon/graphdriver/copy/copy_test.go | 78 +- .../docker/daemon/graphdriver/driver_test.go | 19 +- .../graphdriver/graphtest/graphbench_unix.go | 4 +- .../graphdriver/graphtest/graphtest_unix.go | 28 +- .../graphdriver/graphtest/testutil_unix.go | 36 +- .../graphdriver/quota/projectquota_test.go | 42 +- .../docker/daemon/images/image_history.go | 5 +- .../docker/docker/daemon/images/images.go | 4 +- .../docker/docker/daemon/images/service.go | 2 +- .../docker/docker/daemon/info_unix_test.go | 9 +- .../docker/docker/daemon/inspect_test.go | 8 +- .../docker/daemon/logger/adapter_test.go | 25 +- .../daemon/logger/awslogs/cloudwatchlogs.go | 2 + .../logger/awslogs/cloudwatchlogs_test.go | 91 +- .../daemon/logger/jsonfilelog/jsonfilelog.go | 18 +- .../logger/jsonfilelog/jsonfilelog_test.go | 86 +- .../jsonfilelog/jsonlog/jsonlogbytes_test.go | 19 +- .../jsonlog/time_marshalling_test.go | 14 +- .../daemon/logger/jsonfilelog/read_test.go | 6 +- .../daemon/logger/loggerutils/logfile.go | 289 +- .../daemon/logger/splunk/splunk_test.go | 18 +- .../daemon/logger/templates/templates_test.go | 9 +- .../docker/docker/daemon/network.go | 2 + .../docker/docker/daemon/oci_linux.go | 11 +- .../docker/docker/daemon/oci_linux_test.go | 16 +- .../docker/docker/daemon/oci_windows.go | 4 + .../docker/docker/daemon/reload_test.go | 5 +- .../docker/docker/daemon/stats/collector.go | 6 +- .../docker/docker/daemon/trustkey_test.go | 30 +- .../metadata/v1_id_service_test.go | 4 +- .../docker/docker/distribution/push_v2.go | 20 +- .../docker/distribution/push_v2_test.go | 157 + .../hack/dockerfile/install/proxy.installer | 2 +- .../docker/docker/hack/test/e2e-run.sh | 6 +- .../github.com/docker/docker/image/fs_test.go | 73 +- .../docker/docker/image/image_test.go | 43 +- .../docker/docker/image/spec/README.md | 46 + .../github.com/docker/docker/image/store.go | 7 + .../docker/docker/image/store_test.go | 129 +- .../docker/integration-cli/check_test.go | 69 - .../cli/build/fakegit/fakegit.go | 4 +- .../cli/build/fakestorage/storage.go | 4 +- .../docker/docker/integration-cli/cli/cli.go | 2 + .../docker/integration-cli/daemon/daemon.go | 18 +- .../integration-cli/daemon/daemon_swarm.go | 10 +- .../integration-cli/docker_api_build_test.go | 110 +- .../docker_api_containers_test.go | 34 +- .../docker_api_containers_windows_test.go | 12 +- .../docker_api_inspect_test.go | 7 +- .../integration-cli/docker_api_swarm_test.go | 16 +- .../integration-cli/docker_cli_build_test.go | 156 - .../docker_cli_by_digest_test.go | 5 +- .../integration-cli/docker_cli_create_test.go | 71 - .../docker_cli_plugins_test.go | 46 - .../docker_cli_pull_trusted_test.go | 222 - .../integration-cli/docker_cli_push_test.go | 222 - .../integration-cli/docker_cli_run_test.go | 71 +- .../integration-cli/docker_cli_swarm_test.go | 72 - .../integration-cli/docker_utils_test.go | 13 - .../integration-cli/requirements_test.go | 16 - .../integration-cli/trust_server_test.go | 334 - .../docker/integration/build/build_test.go | 76 +- .../docker/integration/config/config_test.go | 74 +- .../docker/integration/container/copy_test.go | 11 +- .../container/daemon_linux_test.go | 22 +- .../docker/integration/container/diff_test.go | 7 +- .../docker/integration/container/exec_test.go | 15 +- .../integration/container/export_test.go | 45 +- .../integration/container/inspect_test.go | 14 +- .../docker/integration/container/kill_test.go | 28 +- .../integration/container/links_linux_test.go | 34 +- .../docker/integration/container/logs_test.go | 6 +- .../container/mounts_linux_test.go | 16 +- .../docker/integration/container/nat_test.go | 36 +- .../integration/container/pause_test.go | 20 +- .../docker/integration/container/ps_test.go | 23 +- .../integration/container/remove_test.go | 20 +- .../integration/container/rename_test.go | 114 +- .../integration/container/resize_test.go | 10 +- .../integration/container/stats_test.go | 14 +- .../docker/integration/container/stop_test.go | 12 +- .../container/update_linux_test.go | 40 +- .../integration/container/update_test.go | 12 +- .../docker/integration/image/commit_test.go | 16 +- .../docker/integration/image/remove_test.go | 22 +- .../internal/container/container.go | 6 +- .../integration/internal/request/client.go | 8 +- .../integration/internal/swarm/service.go | 16 +- .../docker/integration/network/delete_test.go | 30 +- .../integration/network/inspect_test.go | 75 +- .../integration/network/service_test.go | 123 +- .../plugin/authz/authz_plugin_test.go | 110 +- .../plugin/authz/authz_plugin_v2_test.go | 52 +- .../integration/plugin/authz/main_test.go | 2 + .../plugin/logging/helpers_test.go | 2 - .../integration/plugin/logging/main_test.go | 31 + .../plugin/logging/validation_test.go | 8 +- .../docker/integration/secret/secret_test.go | 78 +- .../docker/integration/service/create_test.go | 82 +- .../integration/service/inspect_test.go | 50 +- .../integration/service/network_test.go | 28 +- .../integration/session/session_test.go | 28 +- .../system/cgroupdriver_systemd_test.go | 64 + .../docker/integration/system/event_test.go | 34 +- .../integration/system/info_linux_test.go | 37 +- .../docker/integration/system/info_test.go | 8 +- .../docker/integration/system/login_test.go | 5 +- .../docker/integration/system/version_test.go | 16 +- .../docker/integration/volume/volume_test.go | 34 +- .../docker/internal/test/environment/clean.go | 29 +- .../internal/test/environment/environment.go | 9 + .../internal/test/environment/protect.go | 22 +- .../docker/internal/testutil/helpers.go | 16 +- .../internal/testutil/stringutils_test.go | 9 +- .../docker/docker/libcontainerd/queue_test.go | 8 +- .../docker/libcontainerd/remote_daemon.go | 12 +- .../docker/docker/opts/quotedstring_test.go | 17 +- .../docker/pkg/archive/archive_linux_test.go | 44 +- .../docker/docker/pkg/archive/archive_test.go | 59 +- .../docker/pkg/archive/archive_unix_test.go | 80 +- .../docker/docker/pkg/archive/changes_test.go | 106 +- .../docker/pkg/archive/copy_unix_test.go | 46 +- .../docker/docker/pkg/archive/wrap_test.go | 10 +- .../docker/pkg/authorization/api_test.go | 21 +- .../pkg/authorization/middleware_test.go | 20 +- .../pkg/authorization/middleware_unix_test.go | 9 +- .../docker/pkg/fileutils/fileutils_test.go | 8 +- .../docker/pkg/idtools/idtools_unix_test.go | 82 +- .../docker/docker/pkg/ioutils/readers_test.go | 5 +- .../docker/pkg/jsonmessage/jsonmessage.go | 34 +- .../pkg/jsonmessage/jsonmessage_test.go | 157 +- .../pkg/parsers/kernel/kernel_windows.go | 35 +- .../operatingsystem_windows.go | 2 +- .../docker/docker/pkg/plugins/client_test.go | 14 +- .../docker/pkg/plugins/discovery_unix_test.go | 4 +- .../docker/docker/pkg/plugins/plugin_test.go | 4 +- .../docker/pkg/plugins/transport/http_test.go | 5 +- .../docker/docker/pkg/pools/pools_test.go | 12 +- .../docker/docker/pkg/reexec/reexec_test.go | 17 +- .../docker/pkg/signal/signal_linux_test.go | 9 +- .../docker/docker/pkg/signal/signal_test.go | 15 +- .../docker/pkg/signal/trap_linux_test.go | 24 +- .../streamformatter/streamformatter_test.go | 67 +- .../pkg/streamformatter/streamwriter_test.go | 16 +- .../docker/pkg/sysinfo/sysinfo_linux_test.go | 22 +- .../docker/pkg/system/stat_unix_test.go | 6 +- .../docker/docker/pkg/tarsum/tarsum_test.go | 16 +- .../docker/docker/pkg/term/ascii_test.go | 18 +- .../docker/docker/pkg/term/proxy_test.go | 69 +- .../docker/docker/pkg/term/term_linux_test.go | 70 +- .../docker/docker/plugin/manager_linux.go | 1 + .../docker/plugin/manager_linux_test.go | 58 + .../docker/docker/profiles/seccomp/seccomp.go | 4 +- .../docker/docker/reference/store_test.go | 20 +- .../docker/docker/registry/config.go | 3 - .../docker/docker/registry/config_test.go | 13 +- .../docker/docker/registry/registry_test.go | 6 +- .../resumable/resumablerequestreader_test.go | 56 +- .../docker/docker/runconfig/config_test.go | 16 +- .../docker/runconfig/hostconfig_test.go | 17 +- vendor/github.com/docker/docker/vendor.conf | 10 +- .../docker/docker/volume/store/db.go | 11 +- .../docker/docker/volume/store/db_test.go | 52 + .../docker/volume/store/restore_test.go | 55 + .../docker/docker/volume/store/store.go | 15 +- .../docker/docker/volume/store/store_test.go | 106 + .../github.com/fnproject/fdk-go/fdk_test.go | 26 +- .../fnproject/fdk-go/utils/utils.go | 5 +- .../go-openapi/jsonpointer/.travis.yml | 4 +- .../go-openapi/jsonreference/.travis.yml | 4 +- .../jsonreference/reference_test.go | 2 +- .../go-openapi/runtime/client/keepalive.go | 53 + .../runtime/client/keepalive_test.go | 73 + .../go-openapi/runtime/client/request.go | 106 +- .../go-openapi/runtime/client/request_test.go | 23 + .../go-openapi/runtime/client/runtime.go | 28 + vendor/github.com/go-openapi/spec/expander.go | 45 + .../go-openapi/spec/expander_test.go | 19 + .../spec/fixtures/specs/todos.common.json | 103 + .../go-openapi/spec/fixtures/specs/todos.json | 346 + vendor/github.com/go-openapi/strfmt/bson.go | 10 +- vendor/github.com/lib/pq/.travis.sh | 12 - vendor/github.com/lib/pq/.travis.yml | 15 +- vendor/github.com/lib/pq/bench_test.go | 5 +- vendor/github.com/lib/pq/conn_test.go | 9 +- vendor/github.com/lib/pq/copy_test.go | 15 +- vendor/github.com/lib/pq/encode_test.go | 27 +- vendor/github.com/lib/pq/error.go | 3 +- vendor/github.com/lib/pq/go18_test.go | 4 +- vendor/github.com/lib/pq/notify.go | 2 +- .../github.com/mailru/easyjson/gen/decoder.go | 2 +- .../github.com/mailru/easyjson/gen/encoder.go | 2 +- .../mailru/easyjson/jlexer/lexer.go | 7 +- .../mailru/easyjson/jlexer/lexer_test.go | 21 +- .../mailru/easyjson/tests/basic_test.go | 1 + .../github.com/mailru/easyjson/tests/data.go | 18 + .../prometheus/common/route/route.go | 7 +- .../github.com/prometheus/procfs/nfs/nfs.go | 20 +- .../github.com/prometheus/procfs/nfs/parse.go | 16 +- .../prometheus/procfs/nfs/parse_nfs_test.go | 32 +- vendor/go.opencensus.io/.travis.yml | 3 +- vendor/go.opencensus.io/README.md | 139 +- .../go.opencensus.io/examples/grpc/README.md | 4 +- .../examples/grpc/helloworld_server/main.go | 13 +- .../examples/helloworld/main.go | 7 +- .../go.opencensus.io/exporter/jaeger/agent.go | 89 + .../jaeger/example}/main.go | 0 .../exporter/jaeger/example_test.go | 15 +- .../jaeger/internal/gen-go/jaeger/agent.go | 244 + .../jaeger/internal/gen-go/jaeger/jaeger.go | 8 +- .../exporter/jaeger/jaeger.go | 98 +- .../exporter/jaeger/jaeger_test.go | 84 + .../prometheus/example}/main.go | 9 +- .../exporter/prometheus/prometheus.go | 16 +- .../exporter/prometheus/prometheus_test.go | 16 +- .../exporter/stackdriver/example_test.go | 10 +- .../stackdriver/examples/stats}/main.go | 7 +- .../exporter/stackdriver/stackdriver.go | 4 + .../exporter/stackdriver/stackdriver_test.go | 26 + .../exporter/stackdriver/stats.go | 56 +- .../exporter/stackdriver/stats_test.go | 28 +- .../exporter/stackdriver/trace.go | 2 +- .../exporter/stackdriver/trace_proto.go | 10 +- .../exporter/stackdriver/trace_proto_test.go | 4 +- .../exporter/zipkin/example/main.go | 75 + .../exporter/zipkin/zipkin.go | 18 +- .../exporter/zipkin/zipkin_test.go | 1 + vendor/go.opencensus.io/internal/internal.go | 2 +- .../internal/readme/source.md | 78 +- .../go.opencensus.io/internal/readme/stats.go | 38 +- .../go.opencensus.io/internal/readme/tags.go | 27 +- .../ocgrpc => }/internal/testpb/generate.sh | 0 .../go.opencensus.io/internal/testpb/impl.go | 93 + .../ocgrpc => }/internal/testpb/test.pb.go | 28 +- .../ocgrpc => }/internal/testpb/test.proto | 1 + .../go.opencensus.io/plugin/ocgrpc/client.go | 32 +- .../plugin/ocgrpc/client_metrics.go | 22 +- .../plugin/ocgrpc/client_metrics_test.go | 14 +- .../plugin/ocgrpc/client_stats_handler.go | 32 +- .../ocgrpc/client_stats_handler_test.go | 7 +- vendor/go.opencensus.io/plugin/ocgrpc/doc.go | 2 + .../plugin/ocgrpc/example_test.go | 6 +- .../plugin/ocgrpc/grpc_test.go | 88 +- .../go.opencensus.io/plugin/ocgrpc/server.go | 55 +- .../plugin/ocgrpc/server_metrics.go | 22 +- .../plugin/ocgrpc/server_stats_handler.go | 47 +- .../ocgrpc/server_stats_handler_test.go | 11 +- .../plugin/ocgrpc/stats_common.go | 27 +- .../plugin/ocgrpc/trace_common.go | 60 +- .../plugin/ocgrpc/trace_common_test.go | 261 +- .../plugin/ocgrpc/trace_test.go | 233 + .../go.opencensus.io/plugin/ochttp/client.go | 34 +- .../plugin/ochttp/client_test.go | 32 +- .../plugin/ochttp/example_test.go | 10 +- .../ochttp/propagation/google/google.go | 89 - .../ochttp/propagation/google/google_test.go | 71 - .../go.opencensus.io/plugin/ochttp/server.go | 64 +- .../plugin/ochttp/server_test.go | 5 +- .../go.opencensus.io/plugin/ochttp/stats.go | 51 +- .../go.opencensus.io/plugin/ochttp/trace.go | 117 +- .../plugin/ochttp/trace_test.go | 323 +- .../go.opencensus.io/stats/internal/record.go | 7 +- vendor/go.opencensus.io/stats/measure.go | 22 +- .../go.opencensus.io/stats/measure_float64.go | 11 + .../go.opencensus.io/stats/measure_int64.go | 11 + vendor/go.opencensus.io/stats/record.go | 21 + .../stats/view/aggregation.go | 109 +- .../stats/view/aggregation_data.go | 10 +- .../stats/view/aggtype_string.go | 16 + .../stats/view/benchmark_test.go | 2 +- .../go.opencensus.io/stats/view/collector.go | 2 +- .../stats/view/example_test.go | 7 +- vendor/go.opencensus.io/stats/view/view.go | 37 +- .../go.opencensus.io/stats/view/view_test.go | 40 +- vendor/go.opencensus.io/stats/view/worker.go | 7 + .../stats/view/worker_commands.go | 5 + .../stats/view/worker_test.go | 16 +- vendor/go.opencensus.io/trace/basetypes.go | 36 +- .../go.opencensus.io/trace/benchmark_test.go | 112 +- vendor/go.opencensus.io/trace/export.go | 1 + vendor/go.opencensus.io/trace/trace.go | 29 +- vendor/go.opencensus.io/trace/trace_test.go | 88 +- vendor/go.opencensus.io/zpages/rpcz.go | 58 +- vendor/go.opencensus.io/zpages/rpcz_test.go | 55 + vendor/go.opencensus.io/zpages/tracez.go | 52 + vendor/golang.org/x/crypto/CONTRIBUTING.md | 15 +- .../x/crypto/acme/autocert/renewal.go | 29 +- .../x/crypto/acme/autocert/renewal_test.go | 146 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 13 +- .../x/crypto/ed25519/ed25519_test.go | 24 + .../internal/edwards25519/edwards25519.go | 22 + vendor/golang.org/x/crypto/nacl/sign/sign.go | 83 + .../x/crypto/nacl/sign/sign_test.go | 74 + .../x/crypto/openpgp/packet/encrypted_key.go | 9 +- .../openpgp/packet/encrypted_key_test.go | 63 +- .../x/crypto/openpgp/packet/packet.go | 32 +- .../x/crypto/openpgp/packet/public_key.go | 11 +- .../crypto/openpgp/packet/public_key_test.go | 26 + vendor/golang.org/x/crypto/xtea/block.go | 2 +- vendor/golang.org/x/crypto/xtea/cipher.go | 4 +- vendor/golang.org/x/net/CONTRIBUTING.md | 15 +- .../x/net/dns/dnsmessage/message.go | 74 +- .../x/net/dns/dnsmessage/message_test.go | 175 +- vendor/golang.org/x/net/icmp/diag_test.go | 274 + vendor/golang.org/x/net/icmp/dstunreach.go | 8 +- vendor/golang.org/x/net/icmp/echo.go | 114 +- vendor/golang.org/x/net/icmp/extension.go | 43 +- .../golang.org/x/net/icmp/extension_test.go | 530 +- vendor/golang.org/x/net/icmp/interface.go | 100 +- vendor/golang.org/x/net/icmp/ipv4_test.go | 118 +- vendor/golang.org/x/net/icmp/message.go | 17 +- vendor/golang.org/x/net/icmp/message_test.go | 245 +- vendor/golang.org/x/net/icmp/multipart.go | 38 +- .../golang.org/x/net/icmp/multipart_test.go | 867 +- vendor/golang.org/x/net/icmp/packettoobig.go | 2 +- vendor/golang.org/x/net/icmp/paramprob.go | 8 +- vendor/golang.org/x/net/icmp/ping_test.go | 200 - vendor/golang.org/x/net/icmp/timeexceeded.go | 8 +- .../golang.org/x/net/internal/iana/const.go | 51 +- vendor/golang.org/x/net/internal/iana/gen.go | 96 +- .../x/net/internal/socket/zsys_netbsd_arm.go | 6 + vendor/golang.org/x/net/ipv4/gen.go | 2 +- vendor/golang.org/x/net/ipv4/iana.go | 10 +- vendor/golang.org/x/net/ipv6/gen.go | 2 +- vendor/golang.org/x/net/ipv6/iana.go | 10 +- vendor/golang.org/x/sync/CONTRIBUTING.md | 15 +- vendor/golang.org/x/sys/CONTRIBUTING.md | 15 +- vendor/golang.org/x/sys/plan9/syscall.go | 3 + vendor/golang.org/x/sys/unix/example_test.go | 19 + vendor/golang.org/x/sys/unix/syscall.go | 11 +- .../golang.org/x/sys/unix/syscall_darwin.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 2 + .../golang.org/x/sys/unix/syscall_freebsd.go | 15 +- .../x/sys/unix/syscall_linux_arm64.go | 1 + .../x/sys/unix/syscall_linux_mips64x.go | 1 + .../x/sys/unix/syscall_linux_mipsx.go | 1 + .../x/sys/unix/syscall_linux_ppc64x.go | 1 + .../x/sys/unix/syscall_linux_sparc64.go | 1 + .../x/sys/unix/syscall_linux_test.go | 142 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 4 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 2 + .../golang.org/x/sys/unix/syscall_solaris.go | 3 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 23 + .../x/sys/unix/syscall_unix_test.go | 129 + vendor/golang.org/x/sys/unix/types_netbsd.go | 11 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 3 + .../x/sys/unix/zsyscall_darwin_386.go | 15 + .../x/sys/unix/zsyscall_darwin_amd64.go | 15 + .../x/sys/unix/zsyscall_darwin_arm.go | 15 + .../x/sys/unix/zsyscall_darwin_arm64.go | 15 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 30 + .../x/sys/unix/zsyscall_freebsd_386.go | 15 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 15 + .../x/sys/unix/zsyscall_freebsd_arm.go | 15 + .../x/sys/unix/zsyscall_linux_arm64.go | 10 + .../x/sys/unix/zsyscall_linux_mips.go | 10 + .../x/sys/unix/zsyscall_linux_mips64.go | 10 + .../x/sys/unix/zsyscall_linux_mips64le.go | 10 + .../x/sys/unix/zsyscall_linux_mipsle.go | 10 + .../x/sys/unix/zsyscall_linux_ppc64.go | 10 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 10 + .../x/sys/unix/zsyscall_linux_sparc64.go | 10 + .../x/sys/unix/zsyscall_netbsd_386.go | 40 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 40 + .../x/sys/unix/zsyscall_netbsd_arm.go | 40 + .../x/sys/unix/zsyscall_openbsd_386.go | 30 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 30 + .../x/sys/unix/zsyscall_openbsd_arm.go | 30 + .../x/sys/unix/zsyscall_solaris_amd64.go | 16 + .../x/sys/unix/ztypes_darwin_386.go | 112 +- .../x/sys/unix/ztypes_darwin_amd64.go | 158 +- .../x/sys/unix/ztypes_darwin_arm.go | 112 +- .../x/sys/unix/ztypes_darwin_arm64.go | 158 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 100 +- .../x/sys/unix/ztypes_linux_sparc64.go | 208 +- .../x/sys/unix/ztypes_netbsd_386.go | 9 + .../x/sys/unix/ztypes_netbsd_amd64.go | 9 + .../x/sys/unix/ztypes_netbsd_arm.go | 9 + .../x/sys/unix/ztypes_solaris_amd64.go | 174 +- vendor/golang.org/x/sys/windows/syscall.go | 3 + .../api/admin/directory/v1/admin-api.json | 47 +- .../api/admin/directory/v1/admin-gen.go | 97 +- .../api/analytics/v3/analytics-api.json | 6 +- .../api/analytics/v3/analytics-gen.go | 4 +- .../v1/androiddeviceprovisioning-api.json | 128 +- .../v1/androiddeviceprovisioning-gen.go | 224 +- .../v1/androidmanagement-api.json | 24 +- .../v1/androidmanagement-gen.go | 19 + .../v2/androidpublisher-api.json | 50 +- .../v2/androidpublisher-gen.go | 51 +- vendor/google.golang.org/api/api-list.json | 140 +- .../api/appengine/v1/appengine-api.json | 8 +- .../api/appengine/v1/appengine-gen.go | 16 +- .../api/appengine/v1alpha/appengine-api.json | 6 +- .../api/appengine/v1alpha/appengine-gen.go | 14 +- .../api/appengine/v1beta/appengine-api.json | 8 +- .../api/appengine/v1beta/appengine-gen.go | 16 +- .../api/appengine/v1beta4/appengine-api.json | 6 +- .../api/appengine/v1beta4/appengine-gen.go | 14 +- .../api/appengine/v1beta5/appengine-api.json | 6 +- .../api/appengine/v1beta5/appengine-gen.go | 14 +- .../api/bigquery/v2/bigquery-api.json | 9 +- .../api/bigquery/v2/bigquery-gen.go | 4 + .../v1/bigquerydatatransfer-api.json | 10 +- .../v1/bigquerydatatransfer-gen.go | 13 +- .../api/chat/v1/chat-api.json | 4 +- .../api/cloudbuild/v1/cloudbuild-api.json | 6 +- .../api/cloudbuild/v1/cloudbuild-gen.go | 15 +- .../api/cloudiot/v1/cloudiot-api.json | 4 +- .../api/cloudiot/v1/cloudiot-gen.go | 5 +- .../api/cloudkms/v1/cloudkms-api.json | 4 +- .../api/cloudkms/v1/cloudkms-gen.go | 4 +- .../v1/cloudresourcemanager-api.json | 8 +- .../v1/cloudresourcemanager-gen.go | 11 +- .../v1beta1/cloudresourcemanager-api.json | 8 +- .../v1beta1/cloudresourcemanager-gen.go | 11 +- .../v2/cloudresourcemanager-api.json | 848 + .../v2/cloudresourcemanager-gen.go | 2826 ++++ .../v2beta1/cloudresourcemanager-api.json | 6 +- .../v2beta1/cloudresourcemanager-gen.go | 2 +- .../api/cloudtrace/v1/cloudtrace-api.json | 4 +- .../api/cloudtrace/v1/cloudtrace-gen.go | 2 + .../api/compute/v0.alpha/compute-api.json | 218 +- .../api/compute/v0.alpha/compute-gen.go | 3838 ++--- .../api/compute/v0.beta/compute-api.json | 331 +- .../api/compute/v0.beta/compute-gen.go | 3553 ++--- .../api/compute/v1/compute-api.json | 148 +- .../api/compute/v1/compute-gen.go | 2983 ++-- .../api/container/v1/container-api.json | 1576 +- .../api/container/v1/container-gen.go | 5665 ++++++- .../api/container/v1beta1/container-api.json | 24 +- .../api/container/v1beta1/container-gen.go | 92 +- .../api/content/v2/content-api.json | 912 +- .../api/content/v2/content-gen.go | 2044 ++- .../api/content/v2sandbox/content-api.json | 6 +- .../api/content/v2sandbox/content-gen.go | 4 +- .../api/dataflow/v1b3/dataflow-api.json | 34 +- .../api/dataflow/v1b3/dataflow-gen.go | 40 + .../api/dataproc/v1/dataproc-api.json | 6 +- .../api/dataproc/v1/dataproc-gen.go | 2 +- .../api/dataproc/v1beta2/dataproc-api.json | 23 +- .../api/dataproc/v1beta2/dataproc-gen.go | 29 +- .../api/datastore/v1/datastore-api.json | 267 +- .../api/datastore/v1/datastore-gen.go | 714 + .../api/datastore/v1beta1/datastore-api.json | 167 +- .../api/datastore/v1beta1/datastore-gen.go | 291 + .../api/datastore/v1beta3/datastore-api.json | 167 +- .../api/datastore/v1beta3/datastore-gen.go | 291 + .../v0.alpha/deploymentmanager-api.json | 7 +- .../v0.alpha/deploymentmanager-gen.go | 6 +- .../v2/deploymentmanager-api.json | 19 +- .../v2/deploymentmanager-gen.go | 26 +- .../v2beta/deploymentmanager-api.json | 7 +- .../v2beta/deploymentmanager-gen.go | 6 +- .../api/dialogflow/v2/dialogflow-api.json | 4250 +++++ .../api/dialogflow/v2/dialogflow-gen.go | 12293 ++++++++++++++ .../google.golang.org/api/dlp/v2/dlp-api.json | 5083 ++++++ .../google.golang.org/api/dlp/v2/dlp-gen.go | 13276 ++++++++++++++++ .../api/dlp/v2beta1/dlp-api.json | 6 +- .../api/dlp/v2beta1/dlp-gen.go | 2 +- .../api/dlp/v2beta2/dlp-api.json | 12 +- .../api/dlp/v2beta2/dlp-gen.go | 13 +- .../google.golang.org/api/dns/v1/dns-api.json | 697 +- .../google.golang.org/api/dns/v1/dns-gen.go | 1783 ++- .../api/dns/v1beta2/dns-api.json | 16 +- .../api/dns/v1beta2/dns-gen.go | 66 +- .../api/dns/v2beta1/dns-api.json | 23 +- .../api/dns/v2beta1/dns-gen.go | 69 +- .../api/firestore/v1beta1/firestore-api.json | 4 +- .../api/firestore/v1beta1/firestore-gen.go | 2 +- .../api/gmail/v1/gmail-api.json | 6 +- .../api/gmail/v1/gmail-gen.go | 4 +- .../manufacturers/v1/manufacturers-api.json | 6 +- .../api/manufacturers/v1/manufacturers-gen.go | 2 +- .../google.golang.org/api/ml/v1/ml-api.json | 10 +- vendor/google.golang.org/api/ml/v1/ml-gen.go | 12 +- .../api/oslogin/v1alpha/oslogin-api.json | 4 +- .../api/oslogin/v1alpha/oslogin-gen.go | 2 +- .../api/oslogin/v1beta/oslogin-api.json | 4 +- .../api/oslogin/v1beta/oslogin-gen.go | 2 +- .../v4/pagespeedonline-api.json | 5 +- .../pagespeedonline/v4/pagespeedonline-gen.go | 2 + .../v1/serviceconsumermanagement-api.json | 69 +- .../v1/serviceconsumermanagement-gen.go | 210 +- .../servicecontrol/v1/servicecontrol-api.json | 4 +- .../servicecontrol/v1/servicecontrol-gen.go | 2 + .../v1/servicemanagement-api.json | 47 +- .../v1/servicemanagement-gen.go | 151 +- .../v1beta1/serviceusage-api.json | 47 +- .../serviceusage/v1beta1/serviceusage-gen.go | 132 +- .../api/serviceuser/v1/serviceuser-api.json | 41 +- .../api/serviceuser/v1/serviceuser-gen.go | 124 +- .../api/sheets/v4/sheets-api.json | 90 +- .../api/sheets/v4/sheets-gen.go | 185 +- .../api/slides/v1/slides-api.json | 4 +- .../api/slides/v1/slides-gen.go | 4 +- .../api/speech/v1/speech-api.json | 5 +- .../api/speech/v1/speech-gen.go | 2 +- .../api/speech/v1beta1/speech-api.json | 5 +- .../api/speech/v1beta1/speech-gen.go | 2 +- .../api/sqladmin/v1beta4/sqladmin-api.json | 8 +- .../api/sqladmin/v1beta4/sqladmin-gen.go | 9 +- .../api/storage/v1/storage-api.json | 10 +- .../api/storage/v1/storage-gen.go | 15 +- .../v1/storagetransfer-api.json | 2 +- .../api/testing/v1/testing-api.json | 10 +- .../api/testing/v1/testing-gen.go | 13 + .../toolresults/v1beta3/toolresults-api.json | 10 +- .../toolresults/v1beta3/toolresults-gen.go | 8 +- .../api/tpu/v1alpha1/tpu-api.json | 118 +- .../api/tpu/v1alpha1/tpu-gen.go | 447 +- .../api/transport/grpc/go18.go | 2 +- .../api/transport/http/go18.go | 8 +- .../api/vision/v1/vision-api.json | 47 +- .../api/vision/v1/vision-gen.go | 175 +- .../api/vision/v1p1beta1/vision-api.json | 11 +- .../api/vision/v1p1beta1/vision-gen.go | 5 +- .../api/vision/v1p2beta1/vision-api.json | 11 +- .../api/vision/v1p2beta1/vision-gen.go | 5 +- .../api/youtube/v3/youtube-api.json | 22 +- .../api/youtube/v3/youtube-gen.go | 10 + .../api/servicemanagement/v1/resources.pb.go | 185 +- .../servicemanagement/v1/servicemanager.pb.go | 181 +- .../cloud/speech/v1p1beta1/cloud_speech.pb.go | 345 +- .../texttospeech/v1beta1/cloud_tts.pb.go | 122 +- .../v1p1beta1/video_intelligence.pb.go | 1551 ++ .../devtools/cloudtrace/v1/trace.pb.go | 2 + .../googleapis/privacy/dlp/v2/dlp.pb.go | 9624 +++++++++++ .../googleapis/privacy/dlp/v2/storage.pb.go | 1614 ++ .../googleapis/spanner/v1/spanner.pb.go | 6 + .../genproto/googleapis/spanner/v1/type.pb.go | 6 + vendor/gopkg.in/yaml.v2/README.md | 2 - vendor/gopkg.in/yaml.v2/decode.go | 13 +- vendor/gopkg.in/yaml.v2/decode_test.go | 54 + vendor/gopkg.in/yaml.v2/encode.go | 8 +- vendor/gopkg.in/yaml.v2/encode_test.go | 23 +- vendor/gopkg.in/yaml.v2/readerc.go | 20 +- vendor/gopkg.in/yaml.v2/resolve.go | 29 +- vendor/gopkg.in/yaml.v2/scannerc.go | 14 +- vendor/gopkg.in/yaml.v2/sorter.go | 9 + 1266 files changed, 122191 insertions(+), 28775 deletions(-) create mode 100644 vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-build.bat create mode 100644 vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-install.bat create mode 100644 vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-test.bat create mode 100644 vendor/git.apache.org/thrift.git/build/cmake/FindGradlew.cmake delete mode 100644 vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile.orig create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cl_generator.cc create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.h create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/thrift-t_php_generator.o-a60a38e9 create mode 100755 vendor/git.apache.org/thrift.git/compiler/cpp/test/bincat.sh create mode 100755 vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin_stability_test.sh create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/CMakeLists.txt create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/README.md create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/catch/catch.hpp create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests.cc create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.cc create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.h create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_helpers_tests.cc create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_initialization_tests.cc create mode 100644 vendor/git.apache.org/thrift.git/compiler/cpp/tests/tests_main.cc create mode 100644 vendor/git.apache.org/thrift.git/contrib/fb303/java/build.properties create mode 100644 vendor/git.apache.org/thrift.git/lib/as3/build.properties create mode 100644 vendor/git.apache.org/thrift.git/lib/cl/Makefile.am create mode 100644 vendor/git.apache.org/thrift.git/lib/cl/README.md create mode 100644 vendor/git.apache.org/thrift.git/lib/cl/READMES/readme-cassandra.lisp create mode 100755 vendor/git.apache.org/thrift.git/lib/cl/ensure-externals.sh create mode 100644 vendor/git.apache.org/thrift.git/lib/cl/load-locally.lisp create mode 100644 vendor/git.apache.org/thrift.git/lib/cl/test/make-test-binary.lisp create mode 100644 vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayHTTPTest.cpp rename vendor/git.apache.org/thrift.git/lib/{go/test/tests/pre_go17.go => cpp/test/OneWayTest.thrift} (50%) create mode 100644 vendor/git.apache.org/thrift.git/lib/cpp/test/RenderedDoubleConstantsTest.cpp create mode 100644 vendor/git.apache.org/thrift.git/lib/dart/test/transport/t_framed_transport_test.dart rename vendor/git.apache.org/thrift.git/lib/delphi/{src/Thrift.Console.pas => test/ConsoleHelper.pas} (96%) create mode 100644 vendor/git.apache.org/thrift.git/lib/erl/test/test_rendered_double_constants.erl rename vendor/git.apache.org/thrift.git/{tutorial/go/src/go17.go => lib/go/test/tests/context.go} (94%) delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/client_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/client_pre_go17.go rename vendor/git.apache.org/thrift.git/lib/go/thrift/{common_test_go17.go => common_test.go} (98%) rename vendor/git.apache.org/thrift.git/lib/go/thrift/{go17.go => context.go} (98%) delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_pre_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_pre_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/pre_go17.go create mode 100644 vendor/git.apache.org/thrift.git/lib/java/build.gradle delete mode 100644 vendor/git.apache.org/thrift.git/lib/java/build.xml create mode 100644 vendor/git.apache.org/thrift.git/lib/java/code_quality_tools/findbugs-filter.xml rename vendor/git.apache.org/thrift.git/lib/java/{build.properties => gradle.properties} (55%) rename vendor/git.apache.org/thrift.git/lib/{go/thrift/common_test_pre_go17.go => java/gradle/additionalArtifacts.gradle} (61%) rename vendor/git.apache.org/thrift.git/lib/{go/test/tests/go17.go => java/gradle/cloverCoverage.gradle} (55%) create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/codeQualityChecks.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/environment.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/functionalTests.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/generateTestThrift.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/publishing.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/sourceConfiguration.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/unitTests.gradle create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradle/wrapper/gradle-wrapper.properties create mode 100755 vendor/git.apache.org/thrift.git/lib/java/gradlew create mode 100644 vendor/git.apache.org/thrift.git/lib/java/gradlew.bat rename vendor/git.apache.org/thrift.git/{test/go/src/bin/testclient/go17.go => lib/java/settings.gradle} (90%) create mode 100644 vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestRenderedDoubleConstants.java create mode 100644 vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactoryStreamedStore.java create mode 100644 vendor/git.apache.org/thrift.git/lib/js/package-lock.json create mode 100644 vendor/git.apache.org/thrift.git/lib/js/test/build.properties create mode 100644 vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.html create mode 100644 vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.js create mode 100644 vendor/git.apache.org/thrift.git/lib/js/test/test-es6.html create mode 100644 vendor/git.apache.org/thrift.git/lib/js/test/test-es6.js create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/.gitignore create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Protocols/ProtocolsOperationsTests.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Thrift.IntegrationTests.csproj create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/.gitignore create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/TCollectionsTests.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/THashSetTests.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolHelperTests.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolTests.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Thrift.Tests.csproj create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TBase64Helper.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolConstants.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolHelper.cs create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/build.cmd create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/build.sh create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/runtests.cmd create mode 100644 vendor/git.apache.org/thrift.git/lib/netcore/runtests.sh delete mode 100644 vendor/git.apache.org/thrift.git/lib/nodejs/test/browser_client.js create mode 100644 vendor/git.apache.org/thrift.git/lib/perl/MANIFEST.SKIP create mode 100644 vendor/git.apache.org/thrift.git/lib/perl/tools/FixupDist.pl create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Base/TBase.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/ClassLoader/ThriftClassLoader.php (78%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TApplicationException.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TException.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Exception/TProtocolException.php (78%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Exception/TTransportException.php (79%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Factory/TBinaryProtocolFactory.php (72%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Factory/TCompactProtocolFactory.php (87%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Factory/TJSONProtocolFactory.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Factory/TProtocolFactory.php (85%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Factory/TStringFuncFactory.php (83%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TTransportFactory.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/JSON/BaseContext.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/JSON/ListContext.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/JSON/LookaheadReader.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/JSON/PairContext.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/SimpleJSON/CollectionMapKeyException.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/SimpleJSON/Context.php (99%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/SimpleJSON/ListContext.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/SimpleJSON/MapContext.php (96%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/SimpleJSON/StructContext.php (93%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocol.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocolAccelerated.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TCompactProtocol.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/TJSONProtocol.php (90%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/TMultiplexedProtocol.php (100%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocol.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/TProtocolDecorator.php (99%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Protocol/TSimpleJSONProtocol.php (98%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Serializer/TBinarySerializer.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Server/TForkingServer.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSSLServerSocket.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServer.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerSocket.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerTransport.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSimpleServer.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/StoredMessageProtocol.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/StringFunc/Core.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/StringFunc/Mbstring.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/StringFunc/TStringFunc.php (100%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/TMultiplexedProcessor.php (84%) delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Base/TBase.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TApplicationException.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TException.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TTransportFactory.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TBinaryProtocol.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TBinaryProtocolAccelerated.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TCompactProtocol.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocol.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Serializer/TBinarySerializer.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TForkingServer.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSSLServerSocket.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServer.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerSocket.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerTransport.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSimpleServer.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TBufferedTransport.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TCurlClient.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TFramedTransport.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/THttpClient.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TMemoryBuffer.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TPhpStream.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSSLSocket.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocket.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocketPool.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TTransport.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TBufferedTransport.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TCurlClient.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TFramedTransport.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/THttpClient.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TMemoryBuffer.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Transport/TNullTransport.php (78%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TPhpStream.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSSLSocket.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocket.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocketPool.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TTransport.php rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Type/TConstant.php (91%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Type/TMessageType.php (91%) rename vendor/git.apache.org/thrift.git/lib/php/lib/{Thrift => }/Type/TType.php (68%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Fixtures.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/JsonSerialize/JsonSerializeTest.php rename vendor/git.apache.org/thrift.git/lib/php/test/{Test/Thrift/Protocol/TestBinarySerializer.php => Protocol/BinarySerializerTest.php} (54%) create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolFixtures.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolTest.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolFixtures.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolTest.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Fixtures.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/JsonSerialize/JsonSerializeTest.php delete mode 100755 vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTJSONProtocol.php delete mode 100755 vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTSimpleJSONProtocol.php delete mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/TestValidators.php create mode 100644 vendor/git.apache.org/thrift.git/lib/php/test/Validator/BaseValidatorTest.php rename vendor/git.apache.org/thrift.git/lib/{go/thrift/processor.go => php/test/Validator/ValidatorTest.php} (62%) rename vendor/git.apache.org/thrift.git/lib/{go/thrift/processor_go17.go => php/test/Validator/ValidatorTestOop.php} (61%) create mode 100644 vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_server_socket_spec.rb create mode 100644 vendor/git.apache.org/thrift.git/package-lock.json create mode 100644 vendor/git.apache.org/thrift.git/phpcs.xml.dist create mode 100644 vendor/git.apache.org/thrift.git/test/DoubleConstantsTest.thrift create mode 100755 vendor/git.apache.org/thrift.git/test/cl/Makefile.am create mode 100644 vendor/git.apache.org/thrift.git/test/cl/implementation.lisp create mode 100644 vendor/git.apache.org/thrift.git/test/cl/make-test-client.lisp create mode 100644 vendor/git.apache.org/thrift.git/test/cl/make-test-server.lisp create mode 100644 vendor/git.apache.org/thrift.git/test/cl/tests.lisp delete mode 100644 vendor/git.apache.org/thrift.git/test/go/src/bin/stress/go17.go delete mode 100644 vendor/git.apache.org/thrift.git/test/go/src/bin/stress/pre_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/pre_go17.go create mode 100644 vendor/git.apache.org/thrift.git/test/go/src/common/context_test.go delete mode 100644 vendor/git.apache.org/thrift.git/test/go/src/common/go17.go delete mode 100644 vendor/git.apache.org/thrift.git/test/go/src/common/pre_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler_go17.go mode change 100644 => 100755 vendor/git.apache.org/thrift.git/test/haxe/make_all.sh create mode 100644 vendor/git.apache.org/thrift.git/test/netcore/Client/.gitignore rename vendor/git.apache.org/thrift.git/test/netcore/{ThriftTest/ThriftTest.csproj => Client/Client.csproj} (55%) rename vendor/git.apache.org/thrift.git/test/netcore/{ThriftTest => Client}/Program.cs (82%) rename vendor/git.apache.org/thrift.git/test/netcore/{ThriftTest => Client}/Properties/AssemblyInfo.cs (97%) rename vendor/git.apache.org/thrift.git/test/netcore/{ThriftTest => Client}/TestClient.cs (92%) create mode 100644 vendor/git.apache.org/thrift.git/test/netcore/Server/.gitignore create mode 100644 vendor/git.apache.org/thrift.git/test/netcore/Server/Program.cs create mode 100644 vendor/git.apache.org/thrift.git/test/netcore/Server/Properties/AssemblyInfo.cs create mode 100644 vendor/git.apache.org/thrift.git/test/netcore/Server/Server.csproj rename vendor/git.apache.org/thrift.git/test/netcore/{ThriftTest => Server}/TestServer.cs (89%) delete mode 100644 vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/launchSettings.json rename vendor/git.apache.org/thrift.git/test/php/{TestPsr4.php => TestClassmap.php} (96%) create mode 100644 vendor/git.apache.org/thrift.git/test/py/TestRenderedDoubleConstants.py create mode 100755 vendor/git.apache.org/thrift.git/tutorial/cl/Makefile.am create mode 100644 vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-client.lisp create mode 100644 vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-server.lisp create mode 100644 vendor/git.apache.org/thrift.git/tutorial/cl/shared-implementation.lisp create mode 100644 vendor/git.apache.org/thrift.git/tutorial/cl/thrift-tutorial.asd create mode 100644 vendor/git.apache.org/thrift.git/tutorial/cl/tutorial-implementation.lisp delete mode 100644 vendor/git.apache.org/thrift.git/tutorial/go/src/handler_go17.go delete mode 100644 vendor/git.apache.org/thrift.git/tutorial/go/src/pre_go17.go create mode 100644 vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/.gitignore create mode 100644 vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md create mode 100644 vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go create mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go create mode 100644 vendor/github.com/docker/docker/daemon/exec_linux_test.go create mode 100644 vendor/github.com/docker/docker/image/spec/README.md delete mode 100644 vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/trust_server_test.go create mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/main_test.go create mode 100644 vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go create mode 100644 vendor/github.com/docker/docker/volume/store/db_test.go create mode 100644 vendor/github.com/docker/docker/volume/store/restore_test.go create mode 100644 vendor/github.com/go-openapi/runtime/client/keepalive.go create mode 100644 vendor/github.com/go-openapi/runtime/client/keepalive_test.go create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/todos.common.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/todos.json create mode 100644 vendor/go.opencensus.io/exporter/jaeger/agent.go rename vendor/go.opencensus.io/{examples/trace/jaeger => exporter/jaeger/example}/main.go (100%) create mode 100644 vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/agent.go rename vendor/go.opencensus.io/{examples/stats/prometheus => exporter/prometheus/example}/main.go (94%) rename vendor/go.opencensus.io/{examples/stats/stackdriver => exporter/stackdriver/examples/stats}/main.go (95%) create mode 100644 vendor/go.opencensus.io/exporter/zipkin/example/main.go rename vendor/go.opencensus.io/{plugin/ocgrpc => }/internal/testpb/generate.sh (100%) create mode 100644 vendor/go.opencensus.io/internal/testpb/impl.go rename vendor/go.opencensus.io/{plugin/ocgrpc => }/internal/testpb/test.pb.go (84%) rename vendor/go.opencensus.io/{plugin/ocgrpc => }/internal/testpb/test.proto (90%) create mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/trace_test.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/google/google_test.go create mode 100644 vendor/go.opencensus.io/stats/view/aggtype_string.go create mode 100644 vendor/go.opencensus.io/zpages/rpcz_test.go create mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign.go create mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign_test.go create mode 100644 vendor/golang.org/x/net/icmp/diag_test.go delete mode 100644 vendor/golang.org/x/net/icmp/ping_test.go create mode 100644 vendor/golang.org/x/sys/unix/example_test.go create mode 100644 vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-api.json create mode 100644 vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-gen.go create mode 100644 vendor/google.golang.org/api/dialogflow/v2/dialogflow-api.json create mode 100644 vendor/google.golang.org/api/dialogflow/v2/dialogflow-gen.go create mode 100644 vendor/google.golang.org/api/dlp/v2/dlp-api.json create mode 100644 vendor/google.golang.org/api/dlp/v2/dlp-gen.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1/video_intelligence.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/dlp.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/storage.pb.go diff --git a/Gopkg.lock b/Gopkg.lock index 24256029f..5208f6024 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -4,8 +4,7 @@ [[projects]] name = "git.apache.org/thrift.git" packages = ["lib/go/thrift"] - revision = "327ebb6c2b6df8bf075da02ef45a2a034e9b79ba" - version = "0.11.0" + revision = "272470790ad6db791bd6f9db399b2cd2d5879f74" [[projects]] branch = "master" @@ -43,8 +42,8 @@ [[projects]] name = "github.com/asaskevich/govalidator" packages = ["."] - revision = "521b25f4b05fd26bec69d9dedeb8f9c9a83939a8" - version = "v8" + revision = "ccb8e960c48f04d6935e72476ae4a51028f9e22f" + version = "v9" [[projects]] name = "github.com/aws/aws-sdk-go" @@ -85,7 +84,7 @@ branch = "master" name = "github.com/beorn7/perks" packages = ["quantile"] - revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] name = "github.com/boltdb/bolt" @@ -96,7 +95,7 @@ branch = "master" name = "github.com/containerd/continuity" packages = ["pathdriver"] - revision = "d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371" + revision = "3e8f2ea4b190484acb976a5b378d373429639a1a" [[projects]] name = "github.com/coreos/go-semver" @@ -154,7 +153,7 @@ "pkg/term", "pkg/term/windows" ] - revision = "241c904e6f5fff020890a7641558e83a209c0bbd" + revision = "29fc64b590badcb1c3f5beff7563ffd31eb58974" [[projects]] name = "github.com/docker/go-connections" @@ -181,7 +180,7 @@ ".", "utils" ] - revision = "49183b946a083774a18b0de0a3184abebc1dec8d" + revision = "0ae900cf56643afe316fcc87323c5845da0531c1" [[projects]] name = "github.com/fnproject/fn_go" @@ -254,13 +253,13 @@ branch = "master" name = "github.com/go-openapi/jsonpointer" packages = ["."] - revision = "779f45308c19820f1a69e9a4cd965f496e0da10f" + revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" [[projects]] branch = "master" name = "github.com/go-openapi/jsonreference" packages = ["."] - revision = "36d33bfe519efae5632669801b180bf1a245da3b" + revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" [[projects]] branch = "master" @@ -281,19 +280,19 @@ "middleware/untyped", "security" ] - revision = "09fac855d8504674b22594470227bd94e5005025" + revision = "62281b694b396a17fe3e4313ee8b0ca2c3cca719" [[projects]] branch = "master" name = "github.com/go-openapi/spec" packages = ["."] - revision = "d8000b5bfbd1147255710505a27c735b6b2ae2ac" + revision = "a3092263d8b39f66ff6fe87b0109668eca1e24ff" [[projects]] branch = "master" name = "github.com/go-openapi/strfmt" packages = ["."] - revision = "6d1a47fad79c81e8cd732889cb80e91123951860" + revision = "6ba31556a6c60db8615afb9d8eddae7aae15eb48" [[projects]] branch = "master" @@ -370,7 +369,7 @@ ".", "oid" ] - revision = "88edab0803230a3898347e77b474f8c1820a1f20" + revision = "614cb7963ff8ee90114d039a0f92dcd6a79292f9" [[projects]] branch = "master" @@ -380,7 +379,7 @@ "jlexer", "jwriter" ] - revision = "f594efddfa171111dc4349cd6e78e8f61dc7936f" + revision = "8b799c424f57fa123fc63a99d6383bc6e4c02578" [[projects]] name = "github.com/mattn/go-isatty" @@ -481,7 +480,7 @@ "internal/bitbucket.org/ww/goautoneg", "model" ] - revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0" + revision = "38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a" [[projects]] branch = "master" @@ -492,7 +491,7 @@ "nfs", "xfs" ] - revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349" + revision = "780932d4fbbe0e69b84c34c20f5c8d0981e109ea" [[projects]] name = "github.com/sirupsen/logrus" @@ -526,13 +525,14 @@ "trace", "trace/propagation" ] - revision = "f1af72ab88d638dcc20ea6ecf83c98b59b092559" + revision = "6e3f034057826b530038d93267906ec3c012183f" + version = "v0.6.0" [[projects]] branch = "master" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - revision = "182114d582623c1caa54f73de9c7224e23a48487" + revision = "88942b9c40a4c9d203b82b3731787b672d6e809b" [[projects]] branch = "master" @@ -547,13 +547,13 @@ "lex/httplex", "trace" ] - revision = "ae89d30ce0c63142b652837da33d782e2b0a9b25" + revision = "6078986fec03a1dcc236c34816c71b0e05018fda" [[projects]] branch = "master" name = "golang.org/x/sync" packages = ["semaphore"] - revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" [[projects]] branch = "master" @@ -562,7 +562,7 @@ "unix", "windows" ] - revision = "c28acc882ebcbfbe8ce9f0f14b9ac26ee138dd51" + revision = "13d03a9a82fba647c21a0ef8fba44a795d0f0835" [[projects]] name = "golang.org/x/text" @@ -596,13 +596,13 @@ branch = "master" name = "google.golang.org/api" packages = ["support/bundler"] - revision = "c24aa0e5ed34558ea50c016e4fb92c5e9aa69f2c" + revision = "e4126357c891acdef6dcd7805daa4c6533be6544" [[projects]] branch = "master" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - revision = "df60624c1e9b9d2973e889c7a1cff73155da81c4" + revision = "ab0870e398d5dd054b868c0db1481ab029b9a9f2" [[projects]] name = "google.golang.org/grpc" @@ -652,12 +652,12 @@ [[projects]] name = "gopkg.in/yaml.v2" packages = ["."] - revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" - version = "v2.1.1" + revision = "86f5ed62f8a0ee96bd888d2efdfd6d4fb100a4eb" + version = "v2.2.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "1da8f3b4885673e5d2b431cf72e0a089da506c4ec6da226d6faaa25aeeff958c" + inputs-digest = "2eb445aa73b99916ee56e155b5461840a296d39be4869557099ff25bb68bec7b" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index cc83e4b46..e2d43db2b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -72,4 +72,8 @@ ignored = ["github.com/fnproject/fn/cli"] [[constraint]] name = "go.opencensus.io" - revision = "f1af72ab88d638dcc20ea6ecf83c98b59b092559" + version = "0.6.0" + +[[override]] + name = "git.apache.org/thrift.git" + revision = "272470790ad6db791bd6f9db399b2cd2d5879f74" diff --git a/api/agent/agent.go b/api/agent/agent.go index 020d9108c..e29e8ffce 100644 --- a/api/agent/agent.go +++ b/api/agent/agent.go @@ -950,7 +950,7 @@ func init() { "docker container stats for "+key, []tag.Key{appKey, pathKey}, dockerStatsDist, - view.DistributionAggregation{}, + view.Distribution(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) diff --git a/api/agent/drivers/docker/docker_client.go b/api/agent/drivers/docker/docker_client.go index 88f5cd8e3..5d0a22734 100644 --- a/api/agent/drivers/docker/docker_client.go +++ b/api/agent/drivers/docker/docker_client.go @@ -127,7 +127,7 @@ func init() { "number of times we've retried docker API upon failure", []tag.Key{appKey, pathKey}, dockerRetriesMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -147,7 +147,7 @@ func init() { "number of times we've timed out calling docker API", []tag.Key{appKey, pathKey}, dockerTimeoutMeasure, - view.CountAggregation{}, + view.Count(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -167,7 +167,7 @@ func init() { "number of unrecoverable errors from docker API", []tag.Key{appKey, pathKey}, dockerErrorMeasure, - view.CountAggregation{}, + view.Count(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -187,7 +187,7 @@ func init() { "number of docker container oom", []tag.Key{appKey, pathKey}, dockerOOMMeasure, - view.CountAggregation{}, + view.Count(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) diff --git a/api/agent/state_trackers.go b/api/agent/state_trackers.go index 1722aef6b..695604a78 100644 --- a/api/agent/state_trackers.go +++ b/api/agent/state_trackers.go @@ -180,7 +180,7 @@ func init() { "containers in state "+key, []tag.Key{appKey, pathKey}, measure, - view.CountAggregation{}, + view.Count(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -203,7 +203,7 @@ func init() { "time spent in container state "+key, []tag.Key{appKey, pathKey}, measure, - view.DistributionAggregation{}, + view.Distribution(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) diff --git a/api/agent/stats.go b/api/agent/stats.go index 2bfe13d84..720ffb670 100644 --- a/api/agent/stats.go +++ b/api/agent/stats.go @@ -105,7 +105,7 @@ func init() { "calls currently queued to agent", []tag.Key{appKey, pathKey}, queuedMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -125,7 +125,7 @@ func init() { "calls created in agent", []tag.Key{appKey, pathKey}, callsMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -145,7 +145,7 @@ func init() { "calls currently running in agent", []tag.Key{appKey, pathKey}, runningMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -165,7 +165,7 @@ func init() { "calls completed in agent", []tag.Key{appKey, pathKey}, completedMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -185,7 +185,7 @@ func init() { "calls failed in agent", []tag.Key{appKey, pathKey}, failedMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -205,7 +205,7 @@ func init() { "calls timed out in agent", []tag.Key{appKey, pathKey}, timedoutMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -225,7 +225,7 @@ func init() { "calls errored in agent", []tag.Key{appKey, pathKey}, errorsMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -245,7 +245,7 @@ func init() { "calls where server was too busy in agent", []tag.Key{appKey, pathKey}, serverBusyMeasure, - view.SumAggregation{}, + view.Sum(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) diff --git a/api/logs/s3/s3.go b/api/logs/s3/s3.go index 2fe660c45..2bfc6076b 100644 --- a/api/logs/s3/s3.go +++ b/api/logs/s3/s3.go @@ -201,7 +201,7 @@ func init() { "uploaded log size", []tag.Key{appKey, pathKey}, uploadSizeMeasure, - view.DistributionAggregation{}, + view.Distribution(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) @@ -221,7 +221,7 @@ func init() { "downloaded log size", []tag.Key{appKey, pathKey}, downloadSizeMeasure, - view.DistributionAggregation{}, + view.Distribution(), ) if err != nil { logrus.Fatalf("cannot create view: %v", err) diff --git a/vendor/git.apache.org/thrift.git/.gitignore b/vendor/git.apache.org/thrift.git/.gitignore index 8f1879e63..66193db3e 100644 --- a/vendor/git.apache.org/thrift.git/.gitignore +++ b/vendor/git.apache.org/thrift.git/.gitignore @@ -24,8 +24,14 @@ tags .*project +.classpath +.settings +.checkstyle junit*.properties .idea +*.iml +*.ipr +*.iws gen-* Makefile Makefile.in @@ -37,13 +43,13 @@ node_modules compile test-driver erl_crash.dump -package-lock.json project.lock.json .sonar .DS_Store .svn .vagrant +.vscode /contrib/.vagrant/ /aclocal/libtool.m4 @@ -94,6 +100,9 @@ project.lock.json /contrib/fb303/py/fb303/ttypes.py /depcomp /install-sh +/lib/cl/backport-update.zip +/lib/cl/lib +/lib/cl/run-tests /lib/cpp/Debug/ /lib/cpp/Debug-mt/ /lib/cpp/Release/ @@ -220,11 +229,14 @@ project.lock.json /lib/erl/test/*.hrl /lib/erl/test/Thrift_omit_without.thrift /lib/haxe/test/bin +/lib/haxe/test/data.tmp /lib/hs/dist +/lib/java/.gradle +/lib/java/android/.gradle /lib/java/build +/lib/java/target /lib/js/dist /lib/js/doc -/lib/js/package-lock.json /lib/js/test/build /lib/netcore/**/.vs /lib/netcore/**/bin @@ -289,11 +301,15 @@ project.lock.json /ltmain.sh /missing /node_modules/ +/vendor/ +/composer.lock /stamp-h1 /test/features/results.json /test/results.json /test/c_glib/test_client /test/c_glib/test_server +/test/cl/TestServer +/test/cl/TestClient /test/cpp/StressTest /test/cpp/StressTestNonBlocking /test/cpp/TestClient @@ -335,6 +351,8 @@ project.lock.json /test/rs/target/ /test/rs/*.iml /test/rs/**/*.iml +/tutorial/cl/TutorialClient +/tutorial/cl/TutorialServer /tutorial/cpp/TutorialClient /tutorial/cpp/TutorialServer /tutorial/c_glib/tutorial_client @@ -382,3 +400,7 @@ project.lock.json /tutorial/rs/target /tutorial/rs/Cargo.lock /ylwrap +/lib/cl/quicklisp.lisp +/lib/cl/externals/ +/lib/cl/run-tests +/lib/cl/quicklisp/ diff --git a/vendor/git.apache.org/thrift.git/.travis.yml b/vendor/git.apache.org/thrift.git/.travis.yml index 9a94e3649..b3a6a2672 100644 --- a/vendor/git.apache.org/thrift.git/.travis.yml +++ b/vendor/git.apache.org/thrift.git/.travis.yml @@ -35,7 +35,6 @@ install: - if [[ `uname` == "Linux" ]]; then build/docker/refresh.sh; fi stages: -# - osx # up front for now (for testing) - docker # docker images - thrift # thrift build jobs @@ -43,8 +42,8 @@ env: global: - SCRIPT="cmake.sh" - BUILD_ARG="" - - BUILD_ENV="-e CC=clang -e CXX=clang++" - - DISTRO=ubuntu-xenial + - BUILD_ENV="-e CC=gcc -e CXX=g++ -e THRIFT_CROSSTEST_CONCURRENCY=4" + - DISTRO=ubuntu-artful - BUILD_LIBS="CPP C_GLIB HASKELL JAVA PYTHON TESTING TUTORIALS" # only meaningful for CMake builds - TRAVIS_BUILD_STAGE=test # DOCKER_REPO (this works for all builds as a source for docker images - you can override for fork builds in your Travis settings) @@ -54,20 +53,9 @@ env: jobs: include: - # ------------------------- phase: osx -------------------------- - # - stage: osx - # os: osx - # osx_image: xcode9 - # script: build/docker/scripts/autotools.sh - # ========================= stage: docker ========================= - stage: docker script: true - env: - - JOB="Docker Build ubuntu-trusty 14.04" - - DISTRO=ubuntu-trusty - - TRAVIS_BUILD_STAGE=docker - - script: true env: - JOB="Docker Build ubuntu-xenial 16.04" - DISTRO=ubuntu-xenial @@ -87,8 +75,6 @@ jobs: env: - JOB="Cross Language Tests" - SCRIPT="cross-test.sh" - - BUILD_ARG="" - - BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" # fork based PR builds cannot exceed 50 minutes per job - stage: thrift @@ -98,7 +84,6 @@ jobs: - JOB="Cross Language Tests (Binary Protocol)" - SCRIPT="cross-test.sh" - BUILD_ARG="-'(binary)'" - - BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" - stage: thrift script: build/docker/run.sh @@ -107,7 +92,6 @@ jobs: - JOB="Cross Language Tests (Header, JSON Protocols)" - SCRIPT="cross-test.sh" - BUILD_ARG="-'(header|json)'" - - BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" - stage: thrift script: build/docker/run.sh @@ -116,7 +100,6 @@ jobs: - JOB="Cross Language Tests (Compact and Multiplexed Protocols)" - SCRIPT="cross-test.sh" - BUILD_ARG="-'(compact|multiplexed)'" - - BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" # ------------------------- phase: sca -------------------------- # QA jobs for code analytics and metrics @@ -125,7 +108,6 @@ jobs: env: - JOB="Static Code Analysis" - SCRIPT="sca.sh" - - DISTRO=ubuntu-artful # C and C++ undefined behavior. # A binary crashes if undefined behavior occurs and produces a stack trace. @@ -134,13 +116,12 @@ jobs: env: - JOB="UBSan" - SCRIPT="ubsan.sh" - - DISTRO=ubuntu-artful - BUILD_ARG="--without-python --without-py3" # ------------------------- phase: cmake ------------------------ - script: build/docker/run.sh env: - - JOB="CMake (Ubuntu Xenial)" + - JOB="CMake" # C++ specific options: compiler plug-in, threading model - script: build/docker/run.sh @@ -149,7 +130,7 @@ jobs: - SCRIPT="cmake.sh" - BUILD_LIBS="CPP TESTING TUTORIALS" - BUILD_ARG="-DCMAKE_CXX_STANDARD=98 -DCMAKE_CXX_STANDARD_REQUIRED=ON -DCMAKE_CXX_EXTENSIONS=OFF --DWITH_BOOSTTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" - - BUILD_ENV="" + - BUILD_ENV="-e CC=clang -e CXX=clang++" - script: build/docker/run.sh env: @@ -166,31 +147,29 @@ jobs: - JOB="Autotools (Ubuntu Artful)" - DISTRO=ubuntu-artful - SCRIPT="autotools.sh" - - BUILD_ENV="-e CC=gcc -e CXX=g++" - script: build/docker/run.sh env: - JOB="Autotools (Ubuntu Xenial)" - DISTRO=ubuntu-xenial - SCRIPT="autotools.sh" - - BUILD_ENV="-e CC=gcc -e CXX=g++" - - - script: build/docker/run.sh - env: - - JOB="Autotools (Ubuntu Trusty)" - - DISTRO=ubuntu-trusty - - SCRIPT="autotools.sh" - - BUILD_ENV="-e CC=gcc -e CXX=g++" # ------------------------- phase: dist ------------------------- - script: build/docker/run.sh env: - JOB="make dist" + - DISTRO=ubuntu-artful - SCRIPT="make-dist.sh" - - BUILD_ENV="-e CC=gcc -e CXX=g++" - script: build/docker/run.sh env: - JOB="Debian Packages" + - DISTRO=ubuntu-artful - SCRIPT="dpkg.sh" - - BUILD_ENV="-e CC=gcc -e CXX=g++" + + ### ------------------------- phase: osx ------------------------- + # disabled due to the time delays it imposes on build jobs + # - os: osx + # osx_image: xcode9 + # script: build/docker/scripts/autotools.sh + diff --git a/vendor/git.apache.org/thrift.git/CHANGES b/vendor/git.apache.org/thrift.git/CHANGES index 4009f264c..7b674d6f6 100644 --- a/vendor/git.apache.org/thrift.git/CHANGES +++ b/vendor/git.apache.org/thrift.git/CHANGES @@ -1,295 +1,5 @@ Apache Thrift Changelog -Thrift 0.11.0 --------------------------------------------------------------------------------- -## Sub-task - * [THRIFT-2733] - Erlang coding standards - * [THRIFT-2740] - Perl coding standards - * [THRIFT-3610] - Streamline exception handling in Python server handler - * [THRIFT-3686] - Java processor should report internal error on uncaught exception - * [THRIFT-4049] - Skip() should throw TProtocolException.INVALID_DATA on unknown data types - * [THRIFT-4053] - Skip() should throw TProtocolException.INVALID_DATA on unknown data types - * [THRIFT-4136] - Align is_binary() method with is_string() to simplify those checks - * [THRIFT-4137] - Fix remaining undefined behavior invalid vptr casts in Thrift Compiler - * [THRIFT-4138] - Fix remaining undefined behavior invalid vptr casts in C++ library - * [THRIFT-4296] - Fix Ubuntu Xenial build environment for the python language - * [THRIFT-4298] - Fix Ubuntu Xenial build environment for the go 1.6 language - * [THRIFT-4299] - Fix Ubuntu Xenial build environment for the D language - * [THRIFT-4300] - Fix make cross in Ubuntu Xenial docker environment, once all language support issues are fixed - * [THRIFT-4302] - Fix Ubuntu Xenial make cross testing for lua and php7 - * [THRIFT-4398] - Update EXTRA_DIST for "make dist" - -## Bug - * [THRIFT-381] - Fail fast if configure detects C++ problems - * [THRIFT-1677] - MinGW support broken - * [THRIFT-1805] - Thrift should not swallow ALL exceptions - * [THRIFT-2026] - Fix TCompactProtocol 64 bit builds - * [THRIFT-2642] - Recursive structs don't work in python - * [THRIFT-2889] - stable release 0.9.2, erlang tutorial broken - * [THRIFT-2913] - Ruby Server Thrift::ThreadPoolServer should serve inside a thread - * [THRIFT-2998] - Node.js: Missing header from http request - * [THRIFT-3000] - .NET implementation has trouble with mixed IP modes - * [THRIFT-3281] - Travis CI build passed but the log says BUILD FAILED - * [THRIFT-3358] - Makefile:1362: *** missing separator. Stop. - * [THRIFT-3600] - Make TTwisted server send exception on unexpected handler error - * [THRIFT-3602] - Make Tornado server send exception on unexpected handler error - * [THRIFT-3657] - D TFileWriterTransport close should use non-priority send - * [THRIFT-3700] - Go Map has wrong default value when optional - * [THRIFT-3703] - Unions Field Count Does Not Consider Map/Set/List Fields - * [THRIFT-3730] - server log error twice - * [THRIFT-3778] - go client can not pass method parameter to server of other language if no field_id is given - * [THRIFT-3784] - thrift-maven-plugin generates invalid include directories for IDL in dependency JARs - * [THRIFT-3801] - Node Thrift client throws exception with multiplexer and responses that are bigger than a single buffer - * [THRIFT-3821] - TMemoryBuffer buffer may overflow when resizing - * [THRIFT-3832] - Thrift version 0.9.3 example on Windows, Visual Studio, linking errors during compiling - * [THRIFT-3847] - thrift/config.h includes a #define for VERSION which will likely conflict with existing user environment or code - * [THRIFT-3873] - Fix various build warnings when using Visual Studio - * [THRIFT-3891] - TNonblockingServer configured with more than one IO threads does not always return from serve() upon stop() - * [THRIFT-3892] - Thrift uses TLS SNI extension provided by OpenSSL library. Older version of OpenSSL(< 0.9.8f) may create problem because they do not support 'SSL_set_tlsext_host_name()'. - * [THRIFT-3895] - Build fails using Java 1.8 with Ant < 1.9 - * [THRIFT-3896] - map data with number string key cannot access that deserialized by php extension - * [THRIFT-3938] - Python TNonblockingServer does not work with SSL - * [THRIFT-3944] - TSSLSocket has dead code in checkHandshake - * [THRIFT-3946] - Java 1.5 compatibility broken for binary fields (java5 option) - * [THRIFT-3960] - Inherited services in Lua generator are not named correctly - * [THRIFT-3962] - Ant build.xml broken on Windows for Java library - * [THRIFT-3963] - Thrift.cabal filename does not match module name - * [THRIFT-3967] - gobject/gparam.h:166:33: warning: enumerator value for ‘G_PARAM_DEPRECATED’ is not an integer constant expression - * [THRIFT-3968] - Deserializing empty string/binary fields - * [THRIFT-3974] - Using clang-3.8 and ThreadSanitizer on the concurrency_test claims bad PThread behavior - * [THRIFT-3984] - PHP7 extension causes segfault - * [THRIFT-4008] - broken ci due to upstream dependency versioning break - * [THRIFT-4009] - Use @implementer instead of implements in TTwisted.py - * [THRIFT-4010] - Q.fcall messing up with *this* pointer inside called function - * [THRIFT-4011] - Sets of Thrift structs generate Go code that can't be serialized to JSON - * [THRIFT-4012] - Python Twisted implementation uses implements, not compatible with Py3 - * [THRIFT-4014] - align C# meta data in AssemblyInfo.cs - * [THRIFT-4015] - Fix wrongly spelled "Thirft"s - * [THRIFT-4016] - testInsanity() impl does not conform to test spec in ThriftTest.thrift - * [THRIFT-4023] - Skip unexpected field types on read/write - * [THRIFT-4024] - Skip() should throw on unknown data types - * [THRIFT-4026] - TSSLSocket doesn't work with Python < 2.7.9 - * [THRIFT-4029] - Accelerated protocols do not build from thrift-py 0.10.0 on PyPI - * [THRIFT-4031] - Go plugin generates invalid code for lists of typedef'ed built-in types - * [THRIFT-4033] - Default build WITH_PLUGIN=ON for all builds results in packaging errors - * [THRIFT-4034] - CMake doesn't work to build compiler on MacOS - * [THRIFT-4036] - Add .NET Core environment/build support to the docker image - * [THRIFT-4038] - socket check: checking an unsigned number against >= 0 never fails - * [THRIFT-4042] - ExtractionError when using accelerated thrift in a multiprocess test - * [THRIFT-4043] - thrift perl debian package is placing files in the wrong place - * [THRIFT-4044] - Build job 17 failing on every pull request; hspec core (haskell) 2.4 issue - * [THRIFT-4046] - MinGW with gcc 6.2 does not compile on Windows - * [THRIFT-4060] - Thrift printTo ostream overload mechanism breaks down when types are nested - * [THRIFT-4062] - Remove debug print from TServiceClient - * [THRIFT-4065] - Document Perl ForkingServer signal restriction imposed by THRIFT-3848 and remove unnecessary code - * [THRIFT-4068] - A code comment in Java ServerSocket is wrong around accept() - * [THRIFT-4073] - enum files are still being generated with unused imports - * [THRIFT-4076] - Appveyor builds failing because ant 1.9.8 was removed from apache servers - * [THRIFT-4077] - AI_ADDRCONFIG redefined after recent change to PlatformSocket header - * [THRIFT-4079] - Generated perl code that returns structures from included thrift files is missing a necessary use clause - * [THRIFT-4087] - Spurious exception destroying TThreadedServer because of incorrect join() call - * [THRIFT-4102] - TBufferedTransport performance issue since 0.10.0 - * [THRIFT-4106] - concurrency_test fails randomly - * [THRIFT-4108] - c_glib thrift ssl has multiple bugs and deprecated functions - * [THRIFT-4109] - Configure Script uses string comparison for versions - * [THRIFT-4129] - C++ TNonblockingServer fd leak when failing to dispatch new connections - * [THRIFT-4131] - Javascript with WebSocket handles oneway methods wrong - * [THRIFT-4134] - Fix remaining undefined behavior invalid vptr casts - * [THRIFT-4140] - Use of non-thread-safe function gmtime() - * [THRIFT-4141] - Installation of haxe in docker files refers to a redirect link and fails - * [THRIFT-4147] - Rust: protocol should accept transports with non-static lifetime - * [THRIFT-4148] - [maven-thrift-plugin] compile error while import a thrift in dependency jar file. - * [THRIFT-4149] - System.out pollutes log files - * [THRIFT-4154] - PHP close() of a TSocket needs to close any type of socket - * [THRIFT-4158] - minor issue in README-MSYS2.md - * [THRIFT-4159] - Building tests fails on MSYS2 (MinGW64) due to a (small?) linker error - * [THRIFT-4160] - TNonblocking server fix use of closed/freed connections - * [THRIFT-4161] - TNonBlocking server using uninitialized event in error paths - * [THRIFT-4162] - TNonBlocking handling of TSockets in error state is incorrect after fd is closed - * [THRIFT-4164] - Core in TSSLSocket cleanupOpenSSL when destroying a mutex used by openssl - * [THRIFT-4165] - C++ build has many warnings under c++03 due to recent changes, cmake needs better platform-independent language level control - * [THRIFT-4166] - Recent fix to remove boost::lexical_cast usage broke VS2010 - * [THRIFT-4167] - Missing compile flag - * [THRIFT-4170] - Support lua 5.1 or earlier properly for object length determination - * [THRIFT-4172] - node.js tutorial client does not import assert, connection issues are not handled properly - * [THRIFT-4177] - Java compiler produces deep copy constructor that could make shallow copy instead - * [THRIFT-4184] - Building on Appveyor: invalid escape sequence \L - * [THRIFT-4185] - fb303 counter encoding fix - * [THRIFT-4189] - Framed/buffered transport Dispose() does not dispose the nested transport - * [THRIFT-4193] - Lower the default maxReadBufferBytes for non-blocking servers - * [THRIFT-4195] - Compilation to GO produces broken code - * [THRIFT-4196] - Cannot generate recursive Rust types - * [THRIFT-4204] - typo in compact spec - * [THRIFT-4206] - Strings in container fields are not decoded properly with py:dynamic and py:utf8strings - * [THRIFT-4208] - C# NamedPipesServer not really working in some scenarios - * [THRIFT-4211] - Fix GError glib management under Thrift - * [THRIFT-4212] - c_glib flush tries to close SSL even if socket is invalid - * [THRIFT-4213] - Travis build fails at curl -sSL https://www.npmjs.com/install.sh | sh - * [THRIFT-4215] - Golang TTransportFactory Pattern Squelches Errors - * [THRIFT-4216] - Golang Http Clients Do Not Respect User Options - * [THRIFT-4218] - Set TCP_NODELAY for PHP client socket - * [THRIFT-4219] - Golang HTTP clients created with Nil buffer - * [THRIFT-4231] - TJSONProtocol throws unexpected non-Thrift-exception on null strings - * [THRIFT-4232] - ./configure does bad ant version check - * [THRIFT-4234] - Travis build fails cross language tests with "Unsupported security protocol type" - * [THRIFT-4237] - Go TServerSocket Race Conditions - * [THRIFT-4240] - Go TSimpleServer does not close properly - * [THRIFT-4243] - Go TSimpleServer race on wait in Stop() method - * [THRIFT-4245] - Golang TFramedTransport's writeBuffer increases if writes to transport failed - * [THRIFT-4246] - Sequence number mismatch on multiplexed clients - * [THRIFT-4247] - Compile fails with openssl 1.1 - * [THRIFT-4248] - Compile fails - strncpy, memcmp, memset not declared in src/thrift/transport/TSSLSocket.cpp - * [THRIFT-4251] - Java Epoll Selector Bug - * [THRIFT-4257] - Typescript async callbacks do not provide the correct types - * [THRIFT-4258] - Boost/std thread wrapping faultiness - * [THRIFT-4260] - Go context generation issue. Context is parameter in Interface not in implementation - * [THRIFT-4261] - Go context generation issue: breaking change in generated code regarding thrift.TProcessorFunction interface - * [THRIFT-4262] - Invalid binding to InterlockedCompareExchange64() with 64-bit targets - * [THRIFT-4263] - Fix use after free bug for thrown exceptions - * [THRIFT-4266] - Erlang library throws during skipping fields of composite type (maps, lists, structs, sets) - * [THRIFT-4268] - Erlang library emits debugging output in transport layer - * [THRIFT-4273] - erlang:now/0: Deprecated BIF. - * [THRIFT-4274] - Python feature tests for SSL/TLS failing - * [THRIFT-4279] - Wrong path in include directive in generated Thrift sources - * [THRIFT-4283] - TNamedPipeServer race condition in interrupt - * [THRIFT-4284] - File contains a NBSP: lib/nodejs/lib/thrift/web_server.js - * [THRIFT-4290] - C# nullable option generates invalid code for non-required enum field with default value - * [THRIFT-4292] - TimerManager::remove() is not implemented - * [THRIFT-4307] - Make ssl-open timeout effective in golang client - * [THRIFT-4312] - Erlang client cannot connect to Python server: exception error: econnrefused - * [THRIFT-4313] - Program code of the Erlang tutorial files contain syntax errors - * [THRIFT-4316] - TByteBuffer.java will read too much data if a previous read returns fewer bytes than requested - * [THRIFT-4319] - command line switch for "evhttp" incorrectly resolved to anon pipes - * [THRIFT-4323] - range check errors or NPE in edge cases - * [THRIFT-4324] - field names can conflict with local vars in generated code - * [THRIFT-4328] - Travis CI builds are timing out (job 1) and haxe builds are failing since 9/11 - * [THRIFT-4329] - c_glib Doesn't have a multiplexed processor - * [THRIFT-4331] - C++: TSSLSockets bug in handling huge messages, bug in handling polling - * [THRIFT-4332] - Binary protocol has memory leaks - * [THRIFT-4334] - Perl indentation incorrect when defaulting field attribute to a struct - * [THRIFT-4339] - Thrift Framed Transport in Erlang crashes server when client disconnects - * [THRIFT-4340] - Erlang fix a crash on client close - * [THRIFT-4355] - Javascript indentation incorrect when defaulting field attribute to a struct - * [THRIFT-4356] - thrift_protocol call Transport cause Segmentation fault - * [THRIFT-4359] - Haxe compiler looks like it is producing incorrect code for map or set key that is binary type - * [THRIFT-4362] - Missing size-check can lead to huge memory allocation - * [THRIFT-4364] - Website contributing guide erroneously recommends submitting patches in JIRA - * [THRIFT-4365] - Perl generated code uses indirect object syntax, which occasionally causes compilation errors. - * [THRIFT-4367] - python TProcessor.process is missing "self" - * [THRIFT-4370] - Ubuntu Artful cppcheck and flake8 are more stringent and causing SCA build job failures - * [THRIFT-4372] - Pipe write operations across a network are limited to 65,535 bytes per write. - * [THRIFT-4374] - cannot load thrift_protocol due to undefined symbol: _ZTVN10__cxxabiv120__si_class_type_infoE - * [THRIFT-4376] - Coverity high impact issue resolution - * [THRIFT-4377] - haxe. socket handles leak in TSimpleServer - * [THRIFT-4381] - Wrong isset bitfield value after transmission - * [THRIFT-4385] - Go remote client -u flag is broken - * [THRIFT-4392] - compiler/..../plugin.thrift structs mis-ordered blows up ocaml generator - * [THRIFT-4395] - Unable to build in the ubuntu-xenial docker image: clap 2.28 requires Rust 1.20 - * [THRIFT-4396] - inconsistent (or plain wrong) version numbers in master/trunk - -## Documentation - * [THRIFT-4157] - outdated readme about Haxe installation on Linux - -## Improvement - * [THRIFT-105] - make a thrift_spec for a structures with negative tags - * [THRIFT-281] - Cocoa library code needs comments, badly - * [THRIFT-775] - performance improvements for Perl - * [THRIFT-2221] - Generate c++ code with std::shared_ptr instead of boost::shared_ptr. - * [THRIFT-2364] - OCaml: Use Oasis exclusively for build process - * [THRIFT-2504] - TMultiplexedProcessor should allow registering default processor called if no service name is present - * [THRIFT-3207] - Enable build with OpenSSL 1.1.0 series - * [THRIFT-3272] - Perl SSL Authentication Support - * [THRIFT-3357] - Generate EnumSet/EnumMap where elements/keys are enums - * [THRIFT-3369] - Implement SSL/TLS support on C with c_glib - * [THRIFT-3467] - Go Maps for Thrift Sets Should Have Values of Type struct{} - * [THRIFT-3580] - THeader for Haskell - * [THRIFT-3627] - Missing basic code style consistency of JavaScript. - * [THRIFT-3706] - There's no support for Multiplexed protocol on c_glib library - * [THRIFT-3766] - Add getUnderlyingTransport() to TZlibTransport - * [THRIFT-3776] - Go code from multiple thrift files with the same namespace - * [THRIFT-3823] - Escape documentation while generating non escaped documetation - * [THRIFT-3854] - allow users to clear read buffers - * [THRIFT-3859] - Unix Domain Socket Support in Objective-C - * [THRIFT-3921] - C++ code should print enums as strings - * [THRIFT-3926] - There should be an error emitted when http status code is not 200 - * [THRIFT-4007] - Micro-optimization of TTransport.py - * [THRIFT-4040] - Add real cause of TNonblockingServerSocket error to exception - * [THRIFT-4064] - Update node library dependencies - * [THRIFT-4069] - All perl packages should have proper namespace, version syntax, and use proper thrift exceptions - * [THRIFT-4071] - Consolidate the Travis CI jobs where possible to put less stress on the Apache Foundation's allocation of CI build slaves - * [THRIFT-4072] - Add the possibility to send custom headers in TCurlClient - * [THRIFT-4075] - Better MinGW support for headers-only boost (without thread library) - * [THRIFT-4081] - Provide a MinGW 64-bit Appveyor CI build for better pull request validation - * [THRIFT-4084] - Improve SSL security in thrift by adding a make cross client that checks to make sure SSLv3 protocol cannot be negotiated - * [THRIFT-4095] - Add multiplexed protocol to Travis CI for make cross - * [THRIFT-4099] - Auto-derive Hash for generated Rust structs - * [THRIFT-4110] - The debian build files do not produce a "-dbg" package for debug symbols of libthrift0 - * [THRIFT-4114] - Space after '///' in doc comments - * [THRIFT-4126] - Validate objects in php extension - * [THRIFT-4130] - Ensure Apache Http connection is released back to pool after use - * [THRIFT-4151] - Thrift Mutex Contention Profiling (pthreads) should be disabled by default - * [THRIFT-4176] - Implement a threaded and threadpool server type for Rust - * [THRIFT-4183] - Named pipe client blocks forever on Open() when there is no server at the other end - * [THRIFT-4190] - improve C# TThreadPoolServer defaults - * [THRIFT-4197] - Implement transparent gzip compression for HTTP transport - * [THRIFT-4198] - Ruby should log Thrift internal errors to global logger - * [THRIFT-4203] - thrift server stop gracefully - * [THRIFT-4205] - c_glib is not linking against glib + gobject - * [THRIFT-4209] - warning CS0414 in T[TLS]ServerSocket.cs - * [THRIFT-4210] - include Thrift.45.csproj into CI runs - * [THRIFT-4217] - HttpClient should support gzip and deflate - * [THRIFT-4222] - Support Unix Domain Sockets in Golang TServerSocket - * [THRIFT-4233] - Make THsHaServer.invoker available (get method only) in inherited classes - * [THRIFT-4236] - Support context in go generated code. - * [THRIFT-4238] - JSON generator: make annotation-aware - * [THRIFT-4269] - Don't append '.' to Erlang namespace if it ends in '_'. - * [THRIFT-4270] - Generate Erlang mapping functions for const maps and lists - * [THRIFT-4275] - Add support for zope.interface only, apart from twisted support. - * [THRIFT-4285] - Pull generated send/recv into library to allow behaviour to be customised - * [THRIFT-4287] - Add c++ compiler "no_skeleton" flag option - * [THRIFT-4288] - Implement logging levels properly for node.js - * [THRIFT-4295] - Refresh the Docker image file suite for Ubuntu, Debian, and CentOS - * [THRIFT-4305] - Emit ddoc for generated items - * [THRIFT-4306] - Thrift imports not replicated to D service output - * [THRIFT-4315] - Add default message for TApplicationException - * [THRIFT-4318] - Delphi performance improvements - * [THRIFT-4325] - Simplify automake cross compilation by relying on one global THRIFT compiler path - * [THRIFT-4327] - Improve TimerManager API to allow removing specific task - * [THRIFT-4330] - Allow unused crates in Rust files - * [THRIFT-4333] - Erlang tutorial examples are using a different port (9999) - * [THRIFT-4343] - Change CI builds to use node.js 8.x LTS once available - * [THRIFT-4345] - Create a docker build environment that uses the minimum supported language levels - * [THRIFT-4346] - Allow Zlib transport factory to wrap other transports - * [THRIFT-4348] - Perl HTTP Client custom HTTP headers - * [THRIFT-4350] - Update netcore build for dotnet 2.0 sdk and make cross validation - * [THRIFT-4351] - Use Travis CI Build Stages to optimize the CI build - * [THRIFT-4353] - cannot read via thrift_protocol at server side - * [THRIFT-4378] - add set stopTimeoutUnit method to TThreadPoolServer - -## New Feature - * [THRIFT-750] - C++ Compiler Virtual Function Option - * [THRIFT-2945] - Implement support for Rust language - * [THRIFT-3857] - thrift js:node complier support an object as parameter not an instance of struct - * [THRIFT-3933] - Port official C# .NET library for Thrift to C# .NET Core libary - * [THRIFT-4039] - Update of Apache Thrift .Net Core lib - * [THRIFT-4113] - Provide a buffer transport for reading/writing in memory byte stream - -## Question - * [THRIFT-2956] - autoconf - possibly undefined macro - AC_PROG_BISON - * [THRIFT-4223] - Add support to the isServing() method for the C++ library - -## Task - * [THRIFT-3622] - Fix deprecated uses of std::auto_ptr - * [THRIFT-4028] - Please remove System.out.format from the source code - * [THRIFT-4186] - Build and test rust client in Travis - -## Test - * [THRIFT-4264] - PHP - Support both shared & static linking of sockets library - -## Wish - * [THRIFT-4344] - Define and maintain the minimum language level for all languages in one place - - Thrift 0.10.0 -------------------------------------------------------------------------------- ## Bug diff --git a/vendor/git.apache.org/thrift.git/LANGUAGES.md b/vendor/git.apache.org/thrift.git/LANGUAGES.md index 2f68a4e6e..779d9b6bb 100644 --- a/vendor/git.apache.org/thrift.git/LANGUAGES.md +++ b/vendor/git.apache.org/thrift.git/LANGUAGES.md @@ -1,20 +1,20 @@ # Apache Thrift Language Support # -Last Modified: 2017-10-05
-Version: 0.11.0+ +Last Modified: 2018-03-06 Thrift supports many programming languages and has an impressive test suite that exercises most of the languages, protocols, and transports that represents a matrix of thousands of possible combinations. Each language typically has a minimum required version as well as support libraries - some mandatory and some optional. All of this information is provided below to help you assess whether you can use Apache Thrift with your project. Obviously this is a complex matrix to maintain and may not be correct in all cases - if you spot an error please inform the developers using the mailing list. -Apache Thrift has a choice of two build systems. The `autoconf` build system is the most complete build and is used to build all supported languages. The `cmake` build system has been designated by the project to replace `autoconf` however this transition will take quite some time to complete. +Apache Thrift has a choice of two build systems. The `autoconf` build system is the most complete build and is used to build all supported languages. The `cmake` build system has been designated by the project to replace `autoconf` however this transition will take quite some time to complete. -The Language/Library Levels indicate the minimum and maximum versions that are used in the [continuous integration environments](build/docker/README.md) (Appveyor, Travis) for Apache Thrift. Note that while a language may contain support for protocols, transports, and servers, the extent to which each is tested as part of the overall build process varies. The definitive integration test for the project is called the "cross" test which executes a test matrix with clients and servers communicating across languages. +The Language/Library Levels indicate the minimum and maximum versions that are used in the [continuous integration environments](build/docker/README.md) (Appveyor, Travis) for Apache Thrift. Other language levels may be supported for each language, however tested less thoroughly; check the README file inside each lib directory for additional details. Note that while a language may contain support for protocols, transports, and servers, the extent to which each is tested as part of the overall build process varies. The definitive integration test for the project is called the "cross" test which executes a test matrix with clients and servers communicating across languages. - +
+ - + @@ -33,6 +33,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -43,8 +44,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -53,8 +55,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -63,6 +66,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -73,6 +77,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -82,9 +87,21 @@ The Language/Library Levels indicate the minimum and maximum versions that are u - + + - + + + + + + + + + + + + @@ -93,8 +110,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -103,6 +121,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -113,8 +132,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u - - + + + @@ -123,8 +143,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -133,8 +154,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -143,8 +165,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -153,8 +176,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -163,8 +187,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -173,6 +198,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -183,6 +209,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -193,6 +220,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -203,9 +231,10 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - - + + @@ -213,8 +242,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -223,8 +253,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -233,8 +264,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -243,8 +275,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -253,8 +286,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -263,8 +297,9 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + - + @@ -273,6 +308,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -285,6 +321,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u + @@ -295,7 +332,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u - + diff --git a/vendor/git.apache.org/thrift.git/Makefile.am b/vendor/git.apache.org/thrift.git/Makefile.am index 3d71fd4b7..cdb8bd2f5 100755 --- a/vendor/git.apache.org/thrift.git/Makefile.am +++ b/vendor/git.apache.org/thrift.git/Makefile.am @@ -54,7 +54,7 @@ empty := space := $(empty) $(empty) comma := , -CROSS_LANGS = @MAYBE_CPP@ @MAYBE_C_GLIB@ @MAYBE_D@ @MAYBE_JAVA@ @MAYBE_CSHARP@ @MAYBE_PYTHON@ @MAYBE_PY3@ @MAYBE_RUBY@ @MAYBE_HASKELL@ @MAYBE_PERL@ @MAYBE_PHP@ @MAYBE_GO@ @MAYBE_NODEJS@ @MAYBE_DART@ @MAYBE_ERLANG@ @MAYBE_LUA@ @MAYBE_RS@ @MAYBE_DOTNETCORE@ +CROSS_LANGS = @MAYBE_CPP@ @MAYBE_C_GLIB@ @MAYBE_CL@ @MAYBE_D@ @MAYBE_JAVA@ @MAYBE_CSHARP@ @MAYBE_PYTHON@ @MAYBE_PY3@ @MAYBE_RUBY@ @MAYBE_HASKELL@ @MAYBE_PERL@ @MAYBE_PHP@ @MAYBE_GO@ @MAYBE_NODEJS@ @MAYBE_DART@ @MAYBE_ERLANG@ @MAYBE_LUA@ @MAYBE_RS@ @MAYBE_DOTNETCORE@ CROSS_LANGS_COMMA_SEPARATED = $(subst $(space),$(comma),$(CROSS_LANGS)) if WITH_PY3 @@ -65,14 +65,14 @@ endif if WITH_PYTHON crossfeature: precross - $(CROSS_PY) test/test.py --retry-count 3 --features .* --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) + $(CROSS_PY) test/test.py --retry-count 5 --features .* --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) else # feature test needs python build crossfeature: endif cross-%: precross crossfeature - $(CROSS_PY) test/test.py --retry-count 3 --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) --client $(CROSS_LANGS_COMMA_SEPARATED) --regex "$*" + $(CROSS_PY) test/test.py --retry-count 5 --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) --client $(CROSS_LANGS_COMMA_SEPARATED) --regex "$*" cross: cross-.* diff --git a/vendor/git.apache.org/thrift.git/README.md b/vendor/git.apache.org/thrift.git/README.md index 543504116..a7ebdf954 100644 --- a/vendor/git.apache.org/thrift.git/README.md +++ b/vendor/git.apache.org/thrift.git/README.md @@ -1,7 +1,7 @@ Apache Thrift ============= -Last Modified: 2017-11-10 +Last Modified: 2017-11-11 License ======= diff --git a/vendor/git.apache.org/thrift.git/Thrift.podspec b/vendor/git.apache.org/thrift.git/Thrift.podspec index 6f8ebb73f..39d378053 100644 --- a/vendor/git.apache.org/thrift.git/Thrift.podspec +++ b/vendor/git.apache.org/thrift.git/Thrift.podspec @@ -1,6 +1,6 @@ Pod::Spec.new do |s| s.name = "Thrift" - s.version = "0.11.0" + s.version = "1.0.0" s.summary = "Apache Thrift is a lightweight, language-independent software stack with an associated code generation mechanism for RPC." s.description = <<-DESC The Apache Thrift software framework, for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages. @@ -13,6 +13,6 @@ The Apache Thrift software framework, for scalable cross-language services devel s.osx.deployment_target = '10.8' s.ios.framework = 'CFNetwork' s.osx.framework = 'CoreServices' - s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-0.11.0" } + s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-1.0.0" } s.source_files = 'lib/cocoa/src/**/*.{h,m,swift}' end \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/appveyor.yml b/vendor/git.apache.org/thrift.git/appveyor.yml index 073ffd492..121fe1cc7 100755 --- a/vendor/git.apache.org/thrift.git/appveyor.yml +++ b/vendor/git.apache.org/thrift.git/appveyor.yml @@ -19,45 +19,55 @@ # build Apache Thrift on AppVeyor - https://ci.appveyor.com -version: '0.11.0.{build}' +version: '1.0.0-dev.{build}' shallow_clone: true os: - - Visual Studio 2015 + - Visual Studio 2017 -cache: - - C:\projects\thrift\buildcache -> build\appveyor\MSVC-appveyor-install.bat - - C:\ProgramData\chocolatey\lib -> build\appveyor\MSVC-appveyor-install.bat - - C:\msys64\var\cache\pacman -> build\appveyor\MSYS-appveyor-install.bat +matrix: + allow_failures: + - PROFILE: CYGWIN + fast_finish: true environment: matrix: - - PROFILE: MSVC2010 - PLATFORM: x86 - CONFIGURATION: Debug - BOOST_VERSION: 1.54.0 - LIBEVENT_VERSION: 2.0.22 - QT_VERSION: 5.6 - ZLIB_VERSION: 1.2.8 - DISABLED_TESTS: StressTestNonBlocking - - - PROFILE: MSVC2015 + - PROFILE: MSVC2017 PLATFORM: x64 CONFIGURATION: Release - BOOST_VERSION: 1.64.0 - LIBEVENT_VERSION: 2.0.22 + BOOST_VERSION: 1.65.1 + LIBEVENT_VERSION: 2.1.8 PYTHON_VERSION: 3.6 - QT_VERSION: 5.8 + QT_VERSION: 5.10 ZLIB_VERSION: 1.2.11 DISABLED_TESTS: StressTestNonBlocking + - PROFILE: MSVC2013 + PLATFORM: x86 + CONFIGURATION: Release + BOOST_VERSION: 1.58.0 + LIBEVENT_VERSION: 2.0.22 + PYTHON_VERSION: 3.5 + QT_VERSION: 5.8 + ZLIB_VERSION: 1.2.8 + DISABLED_TESTS: StressTestNonBlocking + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 + - PROFILE: MINGW PLATFORM: x64 - CONFIGURATION: Release + CONFIGURATION: RelWithDebInfo + DISABLED_TESTS: StressTestNonBlocking -matrix: - fast_finish: true + - PROFILE: CYGWIN + PLATFORM: x86 + CONFIGURATION: RelWithDebInfo + DISABLED_TESTS: (ZlibTest|OpenSSLManualInitTest|TNonblockingServerTest|StressTestNonBlocking) + +# - PROFILE: CYGWIN +# PLATFORM: x64 +# CONFIGURATION: RelWithDebInfo +# DISABLED_TESTS: (ZlibTest|OpenSSLManualInitTest|TNonblockingServerTest|StressTestNonBlocking) install: - cd %APPVEYOR_BUILD_FOLDER% @@ -97,5 +107,4 @@ test_script: # # also need: # environment: -# APPVEYOR_RDP_PASSWORD: thr1FT2345$xyzZ - +# APPVEYOR_RDP_PASSWORD: thr1FT2345$xyzZ \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-build.bat b/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-build.bat new file mode 100644 index 000000000..c22622294 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-build.bat @@ -0,0 +1,36 @@ +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: + +@ECHO OFF +SETLOCAL EnableDelayedExpansion + +CD build\appveyor || EXIT /B +CALL cl_banner_build.bat || EXIT /B +CALL cl_setenv.bat || EXIT /B + +SET CMAKEARGS=^ + -G'%GENERATOR%' ^ + -DCMAKE_BUILD_TYPE=%CONFIGURATION% ^ + -DCMAKE_INSTALL_PREFIX=%INSTDIR% ^ + -DCMAKE_CXX_EXTENSIONS=ON ^ + -DCMAKE_CXX_FLAGS="-D_GNU_SOURCE" ^ + -DCMAKE_CXX_STANDARD=11 ^ + -DWITH_PYTHON=OFF ^ + -DWITH_SHARED_LIB=OFF ^ + -DWITH_STATIC_LIB=ON ^ + -DWITH_STDTHREADS=ON + +@ECHO ON +%BASH% -lc "mkdir -p %BUILDDIR% && cd %BUILDDIR% && cmake.exe %SRCDIR% %CMAKEARGS% && cmake --build . --config %CONFIGURATION% --target install" || EXIT /B +@ECHO OFF diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-install.bat b/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-install.bat new file mode 100644 index 000000000..77db7d40a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-install.bat @@ -0,0 +1,34 @@ +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: + +:: +:: Appveyor install script for CYGWIN +:: Installs third party packages we need for a cmake build +:: + +@ECHO OFF +SETLOCAL EnableDelayedExpansion + +CD build\appveyor || EXIT /B +CALL cl_banner_install.bat || EXIT /B +CALL cl_setenv.bat || EXIT /B +CALL cl_showenv.bat || EXIT /B + +:: +:: Install apt-cyg for package management +:: + +%BASH% -lc "wget rawgit.com/transcode-open/apt-cyg/master/apt-cyg && install apt-cyg /bin && rm -f apt-cyg" || EXIT /B +%BASH% -lc "apt-cyg update" || EXIT /B +%BASH% -lc "apt-cyg install bison cmake flex gcc-g++ libboost-devel libevent-devel make openssl-devel zlib-devel" diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-test.bat b/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-test.bat new file mode 100644 index 000000000..b667f9bb1 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/build/appveyor/CYGW-appveyor-test.bat @@ -0,0 +1,21 @@ +:: +:: Licensed under the Apache License, Version 2.0 (the "License"); +:: you may not use this file except in compliance with the License. +:: You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: + +@ECHO OFF +SETLOCAL EnableDelayedExpansion +CD build\appveyor || EXIT /B +CALL cl_banner_test.bat || EXIT /B +CALL cl_setenv.bat || EXIT /B + +%BASH% -lc "cd %BUILDDIR% && ctest.exe -C %CONFIGURATION% --timeout 300 -VV -E '%DISABLED_TESTS%'" || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-build.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-build.bat index 838e42880..6ebdb906e 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-build.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-build.bat @@ -22,15 +22,14 @@ CALL cl_setenv.bat || EXIT /B SET CMAKEARGS=^ -G'%GENERATOR%' ^ -DCMAKE_BUILD_TYPE=%CONFIGURATION% ^ - -DCMAKE_INSTALL_PREFIX=%INSTDIR_MSYS% ^ - -DCMAKE_MAKE_PROGRAM=/mingw64/bin/mingw32-make ^ - -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc.exe ^ - -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++.exe ^ - -DWITH_LIBEVENT=OFF ^ + -DCMAKE_INSTALL_PREFIX=%INSTDIR% ^ + -DCMAKE_MAKE_PROGRAM=/mingw%NORM_PLATFORM%/bin/mingw32-make ^ + -DCMAKE_C_COMPILER=/mingw%NORM_PLATFORM%/bin/gcc.exe ^ + -DCMAKE_CXX_COMPILER=/mingw%NORM_PLATFORM%/bin/g++.exe ^ -DWITH_PYTHON=OFF ^ -DWITH_SHARED_LIB=OFF ^ -DWITH_STATIC_LIB=ON @ECHO ON -%BASH% -lc "mkdir -p %BUILDDIR_MSYS% && cd %BUILDDIR_MSYS% && cmake.exe %SRCDIR_MSYS% %CMAKEARGS% && cmake --build . --config %CONFIGURATION% --target install" || EXIT /B +%BASH% -lc "mkdir -p %BUILDDIR% && cd %BUILDDIR% && cmake.exe %SRCDIR% %CMAKEARGS% && cmake --build . --config %CONFIGURATION% --target install" || EXIT /B @ECHO OFF diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-install.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-install.bat index 0d5f99e4d..ce8fc7d12 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-install.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-install.bat @@ -13,9 +13,43 @@ :: :: -:: Appveyor install script for MinGW -:: Installs (or builds) third party packages we need +:: Appveyor install script for MINGW on MSYS2 +:: Installs third party packages we need for a cmake build :: -:: Same as the MSYS installation requirements -CALL build\appveyor\MSYS-appveyor-install.bat +@ECHO OFF +SETLOCAL EnableDelayedExpansion + +CD build\appveyor || EXIT /B +CALL cl_banner_install.bat || EXIT /B +CALL cl_setenv.bat || EXIT /B +CALL cl_showenv.bat || EXIT /B + +:: We're going to keep boost at a version cmake understands +SET BOOSTVER=1.64.0-3 +SET BOOSTPKG=mingw-w64-%MINGWPLAT%-boost-%BOOSTVER%-any.pkg.tar.xz +SET IGNORE=--ignore mingw-w64-x86_64-boost --ignore mingw-w64-i686-boost + +SET PACKAGES=^ + --needed -S bison flex make ^ + mingw-w64-%MINGWPLAT%-cmake ^ + mingw-w64-%MINGWPLAT%-libevent ^ + mingw-w64-%MINGWPLAT%-openssl ^ + mingw-w64-%MINGWPLAT%-toolchain ^ + mingw-w64-%MINGWPLAT%-zlib + +::mingw-w64-%MINGWPLAT%-qt5 : WAY too large (1GB download!) - tested in cygwin builds anyway + +:: Remove old packages that no longer exist to avoid an error +%BASH% -lc "pacman --noconfirm --remove libcatgets catgets || true" || EXIT /B + +:: Upgrade things +%BASH% -lc "pacman --noconfirm -Syu %IGNORE%" || EXIT /B +%BASH% -lc "pacman --noconfirm -Su %IGNORE%" || EXIT /B +%BASH% -lc "pacman --noconfirm %PACKAGES%" || EXIT /B + +:: Install a slightly older boost (BOOSTVER) as cmake in mingw +:: does not have built-in dependencies for boost 1.66.0 yet +:: -- this cuts down on build warning output -- + +%BASH% -lc "if [[ $(pacman --query | grep '%MINGWPLAT%-boost') ^!= *"%BOOSTVER%"* ]]; then wget http://repo.msys2.org/mingw/%MINGWPLAT%/%BOOSTPKG% && pacman --noconfirm --needed -U %BOOSTPKG% && rm %BOOSTPKG%; fi" || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-test.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-test.bat index c37c72a9c..499c1ff80 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-test.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MING-appveyor-test.bat @@ -12,5 +12,11 @@ :: limitations under the License. :: -:: Same as MSYS2 -CALL build\appveyor\MSYS-appveyor-test.bat +@ECHO OFF +SETLOCAL EnableDelayedExpansion + +CD build\appveyor || EXIT /B +CALL cl_banner_test.bat || EXIT /B +CALL cl_setenv.bat || EXIT /B + +%BASH% -lc "cd %BUILDDIR% && ctest.exe -C %CONFIGURATION% --timeout 300 -VV -E '%DISABLED_TESTS%'" || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-install.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-install.bat index 573700e0c..95008bd52 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-install.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-install.bat @@ -26,47 +26,35 @@ CALL cl_setenv.bat || EXIT /B CALL cl_showenv.bat || EXIT /B MKDIR "%WIN3P%" || EXIT /B -:: Install ant - this also installs the latest JDK as a dependency -:: The installation of JDK requires us to pick up PATH and JAVE_HOME from the registry -cinst -c "%BUILDCACHE%" -y ant || EXIT /B +choco feature enable -n allowGlobalConfirmation || EXIT /B -:: Install bison and flex +:: Things to install when NOT running in appveyor: +IF "%APPVEYOR_BUILD_ID%" == "" ( + cup -y chocolatey || EXIT /B + cinst -c "%BUILDCACHE%" -y curl || EXIT /B + cinst -c "%BUILDCACHE%" -y 7zip || EXIT /B + cinst -c "%BUILDCACHE%" -y python3 || EXIT /B + cinst -c "%BUILDCACHE%" -y openssl.light || EXIT /B +) + +cinst -c "%BUILDCACHE%" -y jdk8 || EXIT /B cinst -c "%BUILDCACHE%" -y winflexbison3 || EXIT /B -:: zlib +:: zlib - not available through chocolatey CD "%APPVEYOR_SCRIPTS%" || EXIT /B call build-zlib.bat || EXIT /B -:: libevent +:: libevent - not available through chocolatey CD "%APPVEYOR_SCRIPTS%" || EXIT /B call build-libevent.bat || EXIT /B -:: python packages -pip install backports.ssl_match_hostname ^ +:: python packages (correct path to pip set in cl_setenv.bat) +pip.exe ^ + install backports.ssl_match_hostname ^ ipaddress ^ + six ^ tornado ^ twisted || EXIT /B -:: msinttypes - for MSVC2010 only -SET MSINTTYPESURL=https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/msinttypes/msinttypes-r26.zip -IF "%COMPILER%" == "vc100" ( - MKDIR "%WIN3P%\msinttypes" || EXIT /B - CD "%WIN3P%\msinttypes" || EXIT /B - appveyor DownloadFile "%MSINTTYPESURL%" || EXIT /B - 7z x "msinttypes-r26.zip" || EXIT /B -) - -:: appveyor build slaves do not have MSVC2010 Boost installed -IF "%COMPILER%" == "vc100" ( - SET BITS=64 - IF "%PLATFORM%" == "x86" ( - SET BITS=32 - ) - SET BOOSTEXEURL=https://downloads.sourceforge.net/project/boost/boost-binaries/%BOOST_VERSION%/boost_%BOOST_VERSION:.=_%-msvc-10.0-!BITS!.exe - SET BOOSTEXE=C:\projects\thrift\buildcache\boost_%BOOST_VERSION:.=_%-msvc-10.0-!BITS!.exe - appveyor DownloadFile "!BOOSTEXEURL!" -FileName "!BOOSTEXE!" || EXIT /B - "!BOOSTEXE!" /dir=C:\Libraries\boost_%BOOST_VERSION:.=_% /silent || EXIT /B -) - :: Haskell (GHC) and cabal cinst -c "%BUILDCACHE%" -y ghc || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-test.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-test.bat index 16ee2078e..35945792a 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-test.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MSVC-appveyor-test.bat @@ -12,14 +12,21 @@ :: limitations under the License. :: -@ECHO OFF +@ECHO ON SETLOCAL EnableDelayedExpansion CD build\appveyor || EXIT /B CALL cl_banner_test.bat || EXIT /B CALL cl_setenv.bat || EXIT /B CD "%BUILDDIR%" || EXIT /B -:: Add directories to the path to find DLLs of third party libraries so tests run -SET PATH=%BOOST_LIBRARYDIR%;%OPENSSL_ROOT%\bin;%WIN3P%\zlib-inst\bin;%PATH% +DIR C:\libraries +DIR C:\libraries\boost_1_59_0 +DIR C:\libraries\boost_1_60_0 +DIR C:\libraries\boost_1_62_0 +DIR C:\libraries\boost_1_63_0 +DIR C:\libraries\boost_1_64_0 + +:: Add directories to the path to find DLLs of third party libraries so tests run properly! +SET PATH=%BOOST_LIBRARYDIR:/=\%;%OPENSSL_ROOT%\bin;%WIN3P%\zlib-inst\bin;%PATH% ctest -C %CONFIGURATION% --timeout 300 -VV -E "(%DISABLED_TESTS%)" || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-build.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-build.bat index b9d8955e2..44017295e 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-build.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-build.bat @@ -33,12 +33,13 @@ SET CMAKEARGS=-G\"%GENERATOR%\" ^ -DCMAKE_CXX_COMPILER=g++.exe ^ -DCMAKE_MAKE_PROGRAM=make.exe ^ -DCMAKE_INSTALL_PREFIX=%INSTDIR_MSYS% ^ + -DLIBEVENT_ROOT=%LIBEVENT_ROOT% ^ -DOPENSSL_LIBRARIES=%OPENSSL_LIBRARIES% ^ -DOPENSSL_ROOT_DIR=%OPENSSL_ROOT% ^ -DOPENSSL_USE_STATIC_LIBS=ON ^ -DWITH_BOOST_STATIC=ON ^ -DWITH_JAVA=OFF ^ - -DWITH_LIBEVENT=OFF ^ + -DWITH_LIBEVENT=ON ^ -DWITH_PYTHON=%WITH_PYTHON% ^ -DWITH_SHARED_LIB=OFF ^ -DWITH_STATIC_LIB=ON diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-install.bat b/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-install.bat index ff43cd371..a818df305 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-install.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/MSYS-appveyor-install.bat @@ -25,17 +25,24 @@ CALL cl_banner_install.bat || EXIT /B CALL cl_setenv.bat || EXIT /B CALL cl_showenv.bat || EXIT /B +:: We're going to keep boost at a version cmake understands +SET BOOSTPKG=mingw-w64-x86_64-boost-1.64.0-3-any.pkg.tar.xz +SET IGNORE=--ignore mingw-w64-x86_64-boost + SET PACKAGES=^ - --needed -S bison flex ^ - make ^ - mingw-w64-x86_64-boost ^ + --needed -S bison flex make ^ mingw-w64-x86_64-cmake ^ + mingw-w64-x86_64-libevent ^ mingw-w64-x86_64-openssl ^ mingw-w64-x86_64-toolchain ^ mingw-w64-x86_64-zlib -:: omitting libevent-devel for now it is version 2.1.4 and doesn't play nice with MinGW +%BASH% -lc "pacman --noconfirm -Syu %IGNORE%" || EXIT /B +%BASH% -lc "pacman --noconfirm -Su %IGNORE%" || EXIT /B +%BASH% -lc "pacman --noconfirm %PACKAGES%" || EXIT /B + +:: Install a slightly older boost (1.64.0) as cmake 3.10 +:: does not have built-in dependencies for boost 1.66.0 yet +:: -- this cuts down on build warning output -- +%BASH% -lc "wget http://repo.msys2.org/mingw/x86_64/%BOOSTPKG% && pacman --noconfirm --needed -U %BOOSTPKG% && rm %BOOSTPKG%" || EXIT /B -%BASH% -lc "pacman --noconfirm -Syu" || EXIT /B -%BASH% -lc "pacman --noconfirm -Su" || EXIT /B -%BASH% -lc "pacman --noconfirm %PACKAGES%" || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/build-libevent.bat b/vendor/git.apache.org/thrift.git/build/appveyor/build-libevent.bat index 13c74ee15..64b635b1e 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/build-libevent.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/build-libevent.bat @@ -17,14 +17,21 @@ SETLOCAL EnableDelayedExpansion SET URLFILE=libevent-%LIBEVENT_VERSION%-stable.tar.gz SET URL=https://github.com/libevent/libevent/releases/download/release-%LIBEVENT_VERSION%-stable/%URLFILE% -CD %WIN3P% || EXIT /B -appveyor DownloadFile %URL% || EXIT /B -7z x %URLFILE% -so | 7z x -si -ttar > nul || EXIT /B +:: Download - support running a local build or a build in appveyor +CD "%WIN3P%" || EXIT /B +IF "%APPVEYOR_BUILD_ID%" == "" ( + curl -L -f -o "%URLFILE%" "%URL%" +) ELSE ( + appveyor DownloadFile "%URL%" +) +7z x "%URLFILE%" -so | 7z x -si -ttar > nul || EXIT /B CD "libevent-%LIBEVENT_VERSION%-stable" || EXIT /B -nmake -f Makefile.nmake || EXIT /B +nmake -f Makefile.nmake static_libs || EXIT /B + +:: in libevent 2.0 there is no nmake subdirectory in WIN32-Code, but in 2.1 there is mkdir lib || EXIT /B move *.lib lib\ || EXIT /B -move WIN32-Code\event2\* include\event2\ || EXIT /B +move WIN32-Code\event2\* include\event2\ || move WIN32-Code\nmake\event2\* include\event2\ || EXIT /B move *.h include\ || EXIT /B ENDLOCAL diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/build-zlib.bat b/vendor/git.apache.org/thrift.git/build/appveyor/build-zlib.bat index d8811a153..9195726d5 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/build-zlib.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/build-zlib.bat @@ -24,11 +24,18 @@ SET URLFILE=%PACKAGE%.tar.gz SET URL=http://zlib.net/%URLFILE% SET FURL=http://zlib.net/fossils/%URLFILE% -:: Download +:: Download - support running a local build or a build in appveyor CD "%WIN3P%" || EXIT /B -appveyor DownloadFile "%URL%" -IF ERRORLEVEL 1 ( - appveyor DownloadFile "%FURL%" || EXIT /B +IF "%APPVEYOR_BUILD_ID%" == "" ( + curl -L -f -o "%URLFILE%" "%URL%" + IF ERRORLEVEL 1 ( + curl -L -f -o "%URLFILE%" "%FURL%" + ) +) ELSE ( + appveyor DownloadFile "%URL%" + IF ERRORLEVEL 1 ( + appveyor DownloadFile "%FURL%" || EXIT /B + ) ) 7z x "%URLFILE%" -so | 7z x -si -ttar > nul || EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/cl_setcompiler.bat b/vendor/git.apache.org/thrift.git/build/appveyor/cl_setcompiler.bat index b97da7359..733ffc538 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/cl_setcompiler.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/cl_setcompiler.bat @@ -20,7 +20,7 @@ :: vc110 = Visual Studio 2012 :: vc120 = Visual Studio 2013 :: vc140 = Visual Studio 2015 -:: vc150 = Visual Studio 2017 +:: vc141 = Visual Studio 2017 :: :: Honors any existing COMPILER environment variable :: setting instead of overwriting it, to allow it @@ -44,10 +44,10 @@ IF NOT "%PROFILE:~0,4%" == "MSVC" ( IF !ERRORLEVEL! == 0 (SET COMPILER=vc110) CALL :CHECK 18 IF !ERRORLEVEL! == 0 (SET COMPILER=vc120) - CALL :CHECK 19.00 + CALL :CHECK 19.0 IF !ERRORLEVEL! == 0 (SET COMPILER=vc140) - CALL :CHECK 19.10 - IF !ERRORLEVEL! == 0 (SET COMPILER=vc150) + CALL :CHECK 19.1 + IF !ERRORLEVEL! == 0 (SET COMPILER=vc141) ) IF NOT DEFINED COMPILER ( @@ -59,5 +59,5 @@ ECHO [info ] detected compiler edition %COMPILER% EXIT /B 0 :CHECK -cl /? 2>&1 | findstr /C:"Version %1%." > nul +cl /? 2>&1 | findstr /C:"Version %1%" > nul EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/cl_setenv.bat b/vendor/git.apache.org/thrift.git/build/appveyor/cl_setenv.bat index e80d6b569..10af2d347 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/cl_setenv.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/cl_setenv.bat @@ -12,6 +12,8 @@ :: limitations under the License. :: +@ECHO OFF + IF "%PROFILE%" == "MSVC2010" ( CALL "C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\vcvarsall.bat" %PLATFORM% ) ELSE IF "%PROFILE%" == "MSVC2012" ( @@ -21,11 +23,11 @@ ) ELSE IF "%PROFILE%" == "MSVC2015" ( CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" %PLATFORM% ) ELSE IF "%PROFILE%" == "MSVC2017" ( - CALL "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\Common7\Tools\VsDevCmd.bat" %PLATFORM% + CALL :SETUPNEWERMSVC || EXIT /B ) ELSE IF "%PROFILE%" == "MINGW" ( - SET MSYS2_PATH_TYPE=stock -) ELSE IF "%PROFILE%" == "MSYS" ( - SET MSYS2_PATH_TYPE=stock + REM Supported, nothing special to do here. +) ELSE IF "%PROFILE%" == "CYGWIN" ( + REM Supported, nothing special to do here. ) ELSE ( ECHO Unsupported PROFILE=%PROFILE% or PLATFORM=%PLATFORM% EXIT /B 1 @@ -35,58 +37,93 @@ CALL cl_setcompiler.bat || EXIT /B CALL cl_setgenerator.bat || EXIT /B SET APPVEYOR_SCRIPTS=%APPVEYOR_BUILD_FOLDER%\build\appveyor -SET BUILDCACHE=%APPVEYOR_BUILD_FOLDER%\buildcache -SET BUILDDIR=%APPVEYOR_BUILD_FOLDER%\local-thrift-build -SET INSTDIR=%APPVEYOR_BUILD_FOLDER%\local-thrift-inst +SET BUILDCACHE=%APPVEYOR_BUILD_FOLDER%\..\build\cache +SET BUILDDIR=%APPVEYOR_BUILD_FOLDER%\..\build\%PROFILE%\%PLATFORM% +SET INSTDIR=%APPVEYOR_BUILD_FOLDER%\..\build\%PROFILE%\%PLATFORM% SET SRCDIR=%APPVEYOR_BUILD_FOLDER% -: PLATFORM is x64 or x86, but we want x86 to become "32" when we strip it down for paths: +:: PLATFORM is x64 or x86 +:: NORM_PLATFORM is 64 or 32 SET NORM_PLATFORM=%PLATFORM:~-2,2% IF "%NORM_PLATFORM%" == "86" (SET NORM_PLATFORM=32) -:: FindBoost needs forward slashes so cmake doesn't see something as an escaped character -SET BOOST_ROOT=C:/Libraries/boost_%BOOST_VERSION:.=_% -SET BOOST_LIBRARYDIR=%BOOST_ROOT%/lib%NORM_PLATFORM%-msvc-%COMPILER:~-3,2%.0 -SET OPENSSL_ROOT=C:\OpenSSL-Win%NORM_PLATFORM% -SET WIN3P=%APPVEYOR_BUILD_FOLDER%\thirdparty +IF "%PROFILE:~0,4%" == "MSVC" ( -:: MSVC2010 doesn't "do" std::thread -IF "%COMPILER%" == "vc100" ( - SET THREADMODEL=BOOST -) ELSE ( - SET THREADMODEL=STD -) + :: FindBoost needs forward slashes so cmake doesn't see something as an escaped character + SET BOOST_ROOT=C:/Libraries/boost_%BOOST_VERSION:.=_% + SET BOOST_LIBRARYDIR=!BOOST_ROOT!/lib%NORM_PLATFORM%-msvc-%COMPILER:~-3,2%.%COMPILER:~-1,1% + SET OPENSSL_ROOT=C:\OpenSSL-Win%NORM_PLATFORM% + SET WIN3P=%APPVEYOR_BUILD_FOLDER%\thirdparty -IF "%PYTHON_VERSION%" == "" ( - SET WITH_PYTHON=OFF -) ELSE ( - SET WITH_PYTHON=ON - SET PATH=C:\Python%PYTHON_VERSION:.=%\scripts;C:\Python%PYTHON_VERSION:.=%;!PATH! -) -IF "%CONFIGURATION%" == "Debug" (SET ZLIB_LIB_SUFFIX=d) + :: MSVC2010 doesn't "do" std::thread + IF "%COMPILER%" == "vc100" ( + SET THREADMODEL=BOOST + ) ELSE ( + SET THREADMODEL=STD + ) -IF NOT "%QT_VERSION%" == "" ( - IF /i "%PLATFORM%" == "x64" SET QTEXT=_64 - SET PATH=C:\Qt\%QT_VERSION%\%PROFILE%!QTEXT!\bin;!PATH! -) + IF "%PYTHON_VERSION%" == "" ( + SET WITH_PYTHON=OFF + ) ELSE ( + SET WITH_PYTHON=ON + IF /i "%PLATFORM%" == "x64" SET PTEXT=-x64 + SET PATH=C:\Python%PYTHON_VERSION:.=%!PTEXT!\scripts;C:\Python%PYTHON_VERSION:.=%!PTEXT!;!PATH! + ) + IF "%CONFIGURATION%" == "Debug" (SET ZLIB_LIB_SUFFIX=d) -IF NOT "%PROFILE:~0,4%" == "MSVC" ( + IF NOT "%QT_VERSION%" == "" ( + IF /i "%PLATFORM%" == "x64" SET QTEXT=_64 + SET PATH=C:\Qt\%QT_VERSION%\%PROFILE%!QTEXT!\bin;!PATH! + ) + +) ELSE IF "%PROFILE:~0,4%" == "MING" ( + + :: PLATFORM = x86 means MINGWPLAT i686 + :: PLATFORM = x64 means MINGWPLAT x86_64 + SET MINGWPLAT=x86_64 + IF "%PLATFORM%" == "x86" (SET MINGWPLAT=i686) SET BASH=C:\msys64\usr\bin\bash.exe - SET BOOST_ROOT= - SET BOOST_INCLUDEDIR=/mingw64/include - SET BOOST_LIBRARYDIR=/mingw64/lib - SET OPENSSL_LIBRARIES=/mingw64/lib - SET OPENSSL_ROOT=/mingw64 - SET WIN3P= + !BASH! -lc "sed -i '/export PATH=\/mingw32\/bin/d' ~/.bash_profile && sed -i '/export PATH=\/mingw64\/bin/d' ~/.bash_profile && echo 'export PATH=/mingw%NORM_PLATFORM%/bin:$PATH' >> ~/.bash_profile" || EXIT /B - !BASH! -lc "sed -i '/export PATH=\/mingw64\/bin/d' ~/.bash_profile && echo 'export PATH=/mingw64/bin:$PATH' >> ~/.bash_profile" || EXIT /B + SET BUILDDIR=%BUILDDIR:\=/% + SET BUILDDIR=/c!BUILDDIR:~2! + SET INSTDIR=%INSTDIR:\=/% + SET INSTDIR=/c!INSTDIR:~2! + SET SRCDIR=%SRCDIR:\=/% + SET SRCDIR=/c!SRCDIR:~2! + +) ELSE IF "%PROFILE:~0,4%" == "CYGW" ( + + SET CYGWINROOT=C:\cygwin + IF "%PLATFORM%" == "x64" (SET CYGWINROOT=!CYGWINROOT!64) + + SET BASH=!CYGWINROOT!\bin\bash.exe + SET SETUP=!CYGWINROOT!\setup-x86 + IF "%PLATFORM%" == "x64" (SET SETUP=!SETUP!_64) + SET SETUP=!SETUP!.exe + + SET BUILDDIR=%BUILDDIR:\=/% + SET BUILDDIR=/cygdrive/c!BUILDDIR:~2! + SET INSTDIR=%INSTDIR:\=/% + SET INSTDIR_CYG=/cygdrive/c!INSTDIR:~2! + SET SRCDIR=%SRCDIR:\=/% + SET SRCDIR=/cygdrive/c!SRCDIR:~2! ) -SET BUILDDIR_MSYS=%BUILDDIR:\=/% -SET BUILDDIR_MSYS=/c%BUILDDIR_MSYS:~2% -SET INSTDIR_MSYS=%INSTDIR:\=/% -SET INSTDIR_MSYS=/c%INSTDIR_MSYS:~2% -SET SRCDIR_MSYS=%SRCDIR:\=/% -SET SRCDIR_MSYS=/c%SRCDIR_MSYS:~2% +GOTO :EOF + +:SETUPNEWERMSVC + FOR /F "USEBACKQ TOKENS=*" %%i IN (`call "%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -version "[15.0,16.0)" -property installationPath`) DO ( + IF "%MSVCROOT%" == "" (SET MSVCROOT=%%i) + ) + SET MSVCPLAT=x86 + IF "%PLATFORM%" == "x64" (SET MSVCPLAT=amd64) + + SET CURRENTDIR=%CD% + CALL "!MSVCROOT!\Common7\Tools\VsDevCmd.bat" -arch=!MSVCPLAT! || EXIT /B + CD %CURRENTDIR% + EXIT /B + +:EOF diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/cl_setgenerator.bat b/vendor/git.apache.org/thrift.git/build/appveyor/cl_setgenerator.bat index 7ca98530f..5eb6ff31f 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/cl_setgenerator.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/cl_setgenerator.bat @@ -28,6 +28,7 @@ :: Optional [arch] can be "Win64" or "IA64". :: MinGW Makefiles = Generates makefiles for MinGW :: MSYS Makefiles = Generates makefiles for MSYS +:: Unix Makefiles = Generates makefiles for CYGWIN :: :: Honors any existing GENERATOR environment variable :: setting instead of overwriting it, to allow it @@ -45,6 +46,10 @@ IF DEFINED GENERATOR ( IF "%PROFILE:~0,4%" == "MING" ( SET GENERATOR=MinGW Makefiles + +) ELSE IF "%PROFILE:~0,4%" == "CYGW" ( + SET GENERATOR=Unix Makefiles + ) ELSE IF "%PROFILE:~0,4%" == "MSYS" ( SET GENERATOR=MSYS Makefiles ) ELSE ( @@ -55,9 +60,9 @@ IF "%PROFILE:~0,4%" == "MING" ( IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 11 2012!GENARCH! CALL :CHECK 18 IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 12 2013!GENARCH! - CALL :CHECK 19.00 + CALL :CHECK 19.0 IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 14 2015!GENARCH! - CALL :CHECK 19.10 + CALL :CHECK 19.1 IF !ERRORLEVEL! == 0 SET GENERATOR=Visual Studio 15 2017!GENARCH! ) @@ -70,5 +75,5 @@ ECHO [info ] using CMake generator %GENERATOR% EXIT /B 0 :CHECK -cl /? 2>&1 | findstr /C:"Version %1%." > nul +cl /? 2>&1 | findstr /C:"Version %1%" > nul EXIT /B diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/cl_showenv.bat b/vendor/git.apache.org/thrift.git/build/appveyor/cl_showenv.bat index 33dd66072..3dda546e5 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/cl_showenv.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/cl_showenv.bat @@ -34,6 +34,7 @@ ECHO/ ECHO Our Variables ECHO ------------------------------------------------------------------------------- ECHO APPVEYOR_SCRIPTS = %APPVEYOR_SCRIPTS% +ECHO BASH = %BASH% ECHO BOOST_ROOT = %BOOST_ROOT% ECHO BOOST_INCLUDEDIR = %BOOST_INCLUDEDIR% ECHO BOOST_LIBRARYDIR = %BOOST_LIBRARYDIR% @@ -44,20 +45,16 @@ ECHO GENERATOR = %GENERATOR% ECHO INSTDIR = %INSTDIR% ECHO JAVA_HOME = %JAVA_HOME% ECHO OPENSSL_ROOT = %OPENSSL_ROOT% +ECHO SETUP = %SETUP% ECHO SRCDIR = %SRCDIR% ECHO WIN3P = %WIN3P% ECHO WITH_PYTHON = %WITH_PYTHON% ECHO ZLIB_STATIC_SUFFIX = %ZLIB_STATIC_SUFFIX% IF NOT "%PROFILE:~0,4%" == "MSVC" ( -ECHO/ -ECHO MSYS2/MinGW -ECHO ------------------------------------------------------------------------------- -ECHO BUILDDIR_MSYS = %BUILDDIR_MSYS% -ECHO INSTDIR_MSYS = %INSTDIR_MSYS% -ECHO MSYS2_PATH_TYPE = %MSYS2_PATH_TYPE% -ECHO SRCDIR_MSYS = %SRCDIR_MSYS% -ECHO PATH = -C:\msys64\usr\bin\bash -lc "echo $PATH" + ECHO/ + ECHO UNIXy PATH + ECHO ------------------------------------------------------------------------------- + %BASH% -lc "echo $PATH" ) ECHO/ ECHO Windows PATH diff --git a/vendor/git.apache.org/thrift.git/build/appveyor/simulate-appveyor.bat b/vendor/git.apache.org/thrift.git/build/appveyor/simulate-appveyor.bat index b32c0da12..8674f40b7 100644 --- a/vendor/git.apache.org/thrift.git/build/appveyor/simulate-appveyor.bat +++ b/vendor/git.apache.org/thrift.git/build/appveyor/simulate-appveyor.bat @@ -16,7 +16,7 @@ :: Helps build thrift by pretending to be appveyor :: Usage: :: cd build\appveyor -:: simulate-appveyor.bat [Debug|Release] [x86|x64] [MINGW|MSVC2015] +:: simulate-appveyor.bat [Debug|Release] [x86|x64] [CYGWIN|MINGW|MSVC201?] :: @ECHO OFF diff --git a/vendor/git.apache.org/thrift.git/build/cmake/ConfigureChecks.cmake b/vendor/git.apache.org/thrift.git/build/cmake/ConfigureChecks.cmake index 12a50df91..6b9c6a32f 100644 --- a/vendor/git.apache.org/thrift.git/build/cmake/ConfigureChecks.cmake +++ b/vendor/git.apache.org/thrift.git/build/cmake/ConfigureChecks.cmake @@ -34,14 +34,16 @@ check_include_file(getopt.h HAVE_GETOPT_H) check_include_file(inttypes.h HAVE_INTTYPES_H) check_include_file(netdb.h HAVE_NETDB_H) check_include_file(netinet/in.h HAVE_NETINET_IN_H) +check_include_file(signal.h HAVE_SIGNAL_H) check_include_file(stdint.h HAVE_STDINT_H) check_include_file(unistd.h HAVE_UNISTD_H) check_include_file(pthread.h HAVE_PTHREAD_H) -check_include_file(sys/time.h HAVE_SYS_TIME_H) +check_include_file(sys/ioctl.h HAVE_SYS_IOCTL_H) check_include_file(sys/param.h HAVE_SYS_PARAM_H) check_include_file(sys/resource.h HAVE_SYS_RESOURCE_H) check_include_file(sys/socket.h HAVE_SYS_SOCKET_H) check_include_file(sys/stat.h HAVE_SYS_STAT_H) +check_include_file(sys/time.h HAVE_SYS_TIME_H) check_include_file(sys/un.h HAVE_SYS_UN_H) check_include_file(sys/poll.h HAVE_SYS_POLL_H) check_include_file(sys/select.h HAVE_SYS_SELECT_H) diff --git a/vendor/git.apache.org/thrift.git/build/cmake/DefineCMakeDefaults.cmake b/vendor/git.apache.org/thrift.git/build/cmake/DefineCMakeDefaults.cmake index bb3239980..28999379f 100644 --- a/vendor/git.apache.org/thrift.git/build/cmake/DefineCMakeDefaults.cmake +++ b/vendor/git.apache.org/thrift.git/build/cmake/DefineCMakeDefaults.cmake @@ -35,7 +35,7 @@ set(CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE ON) set(CMAKE_COLOR_MAKEFILE ON) # Define the generic version of the libraries here -set(GENERIC_LIB_VERSION "0.11.0") +set(GENERIC_LIB_VERSION "0.1.0") set(GENERIC_LIB_SOVERSION "0") # Set the default build type to release with debug info diff --git a/vendor/git.apache.org/thrift.git/build/cmake/DefineOptions.cmake b/vendor/git.apache.org/thrift.git/build/cmake/DefineOptions.cmake index f1ea7bbc3..f419229a0 100644 --- a/vendor/git.apache.org/thrift.git/build/cmake/DefineOptions.cmake +++ b/vendor/git.apache.org/thrift.git/build/cmake/DefineOptions.cmake @@ -127,10 +127,10 @@ if(ANDROID) CMAKE_DEPENDENT_OPTION(BUILD_JAVA "Build Java library" ON "BUILD_LIBRARIES;WITH_JAVA;GRADLE_FOUND" OFF) else() + find_package(Gradlew QUIET) find_package(Java QUIET) - find_package(Ant QUIET) CMAKE_DEPENDENT_OPTION(BUILD_JAVA "Build Java library" ON - "BUILD_LIBRARIES;WITH_JAVA;JAVA_FOUND;ANT_FOUND" OFF) + "BUILD_LIBRARIES;WITH_JAVA;JAVA_FOUND;GRADLEW_FOUND" OFF) endif() # Python @@ -193,7 +193,7 @@ if(ANDROID) MESSAGE_DEP(GRADLE_FOUND "Gradle missing") else() MESSAGE_DEP(JAVA_FOUND "Java Runtime missing") - MESSAGE_DEP(ANT_FOUND "Ant missing") + MESSAGE_DEP(GRADLEW_FOUND "Gradle Wrapper missing") endif() message(STATUS " Build Python library: ${BUILD_PYTHON}") MESSAGE_DEP(WITH_PYTHON "Disabled by WITH_PYTHON=OFF") diff --git a/vendor/git.apache.org/thrift.git/build/cmake/DefinePlatformSpecifc.cmake b/vendor/git.apache.org/thrift.git/build/cmake/DefinePlatformSpecifc.cmake index f4434891c..8af4b6c91 100644 --- a/vendor/git.apache.org/thrift.git/build/cmake/DefinePlatformSpecifc.cmake +++ b/vendor/git.apache.org/thrift.git/build/cmake/DefinePlatformSpecifc.cmake @@ -90,6 +90,7 @@ elseif(UNIX) endif() add_definitions("-D__STDC_FORMAT_MACROS") +add_definitions("-D__STDC_LIMIT_MACROS") # WITH_*THREADS selects which threading library to use if(WITH_BOOSTTHREADS) diff --git a/vendor/git.apache.org/thrift.git/build/cmake/FindGradlew.cmake b/vendor/git.apache.org/thrift.git/build/cmake/FindGradlew.cmake new file mode 100644 index 000000000..17bb99899 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/build/cmake/FindGradlew.cmake @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +# GRADLEW_FOUND - system has Gradlew +# GRADLEW_EXECUTABLE - the Gradlew executable +# +# It will search the location CMAKE_SOURCE_DIR/lib/java + +include(FindPackageHandleStandardArgs) + +find_program(GRADLEW_EXECUTABLE gradlew PATHS ${CMAKE_SOURCE_DIR}/lib/java NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) +find_package_handle_standard_args(Gradlew DEFAULT_MSG GRADLEW_EXECUTABLE) +mark_as_advanced(GRADLEW_EXECUTABLE) + +# Buggy find_program cannot find gradlew.bat when gradlew is at the same path +# and even buggier ctest will not execute gradlew.bat when gradlew is given. +if(CMAKE_HOST_WIN32) + string(REGEX REPLACE "(.+gradlew)$" "\\1.bat" GRADLEW_EXECUTABLE ${GRADLEW_EXECUTABLE}) +endif(CMAKE_HOST_WIN32) diff --git a/vendor/git.apache.org/thrift.git/build/cmake/config.h.in b/vendor/git.apache.org/thrift.git/build/cmake/config.h.in index 083bc55ec..c5d4d307d 100644 --- a/vendor/git.apache.org/thrift.git/build/cmake/config.h.in +++ b/vendor/git.apache.org/thrift.git/build/cmake/config.h.in @@ -91,6 +91,9 @@ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_NETINET_IN_H 1 +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SIGNAL_H 1 + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_STDINT_H 1 @@ -100,8 +103,8 @@ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_PTHREAD_H 1 -/* Define to 1 if you have the header file. */ -#cmakedefine HAVE_SYS_TIME_H 1 +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SYS_IOCTL_H 1 /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYS_PARAM_H 1 @@ -124,6 +127,9 @@ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYS_SELECT_H 1 +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SYS_TIME_H 1 + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SCHED_H 1 diff --git a/vendor/git.apache.org/thrift.git/build/docker/README.md b/vendor/git.apache.org/thrift.git/build/docker/README.md index a8d950157..cae4577b9 100644 --- a/vendor/git.apache.org/thrift.git/build/docker/README.md +++ b/vendor/git.apache.org/thrift.git/build/docker/README.md @@ -31,9 +31,8 @@ The Travis CI (continuous integration) builds use the Ubuntu Trusty, Xenial, and ### Ubuntu ### -* trusty (legacy) * xenial (stable) -* artful (latest) +* artful (current) ## Unsupported Containers ## @@ -115,54 +114,53 @@ Open a command prompt in the image: Last updated: October 1, 2017 -| Tool | ubuntu-trusty | ubuntu-xenial | ubuntu-artful | Notes | -| :-------- | :------------ | :------------ | :------------ | :---- | -| ant | 1.9.3 | 1.9.6 | 1.9.9 | | -| autoconf | 2.69 | 2.69 | 2.69 | | -| automake | 1.14.1 | 1.15 | 1.15 | | -| bison | 3.0.2 | 3.0.4 | 3.0.4 | | -| boost | 1.54.0 | 1.58.0 | 1.63.0 | artful: stock boost 1.62.0 has problems running unit tests | -| cmake | 3.2.2 | 3.5.1 | 3.9.1 | | -| cppcheck | 1.61 | 1.72 | 1.80 | | -| flex | 2.5.35 | 2.6.0 | 2.6.1 | | -| glibc | 2.19 | 2.23 | 2.26 | | -| libevent | 2.0.21 | 2.0.21 | 2.1 | | -| libstdc++ | 4.8.4 | 5.4.0 | 7.2.0 | | -| make | 3.81 | 4.1 | 4.1 | | -| openssl | 1.0.1f | 1.0.2g | 1.0.2g | | -| qt5 | 5.2.1 | 5.5.1 | 5.9.1 | | +| Tool | ubuntu-xenial | ubuntu-artful | Notes | +| :-------- | :------------ | :------------ | :---- | +| ant | 1.9.6 | 1.9.9 | | +| autoconf | 2.69 | 2.69 | | +| automake | 1.15 | 1.15 | | +| bison | 3.0.4 | 3.0.4 | | +| boost | 1.58.0 | 1.63.0 | artful: stock boost 1.62.0 has problems running unit tests | +| cmake | 3.5.1 | 3.9.1 | | +| cppcheck | 1.72 | 1.80 | | +| flex | 2.6.0 | 2.6.1 | | +| glibc | 2.23 | 2.26 | | +| libevent | 2.0.21 | 2.1 | | +| libstdc++ | 5.4.0 | 7.2.0 | | +| make | 4.1 | 4.1 | | +| openssl | 1.0.2g | 1.0.2g | | +| qt5 | 5.5.1 | 5.9.1 | | ## Compiler/Language Versions per Dockerfile ## -Last updated: October 1, 2017 - -| Language | ubuntu-trusty | ubuntu-xenial | ubuntu-artful | Notes | -| :-------- | :------------ | :------------ | :------------ | :---- | -| as3 | | | | Not in CI | -| C++ gcc | 4.8.4 | 5.4.0 | 7.2.0 | | -| C++ clang | 3.4 | 3.8 | 4.0 | | -| C# (mono) | 3.2.8.0 | 4.2.1 | 4.6.2.7 | | -| c_glib | 2.40.2 | 2.48.2 | 2.54.0 | | -| cocoa | | | | Not in CI | -| d | 2.070.2 | 2.073.2 | 2.076.0 | | -| dart | 1.20.1 | 1.24.2 | 1.24.2 | | -| delphi | | | | Not in CI | -| dotnet | | 2.0.3 | 2.0.3 | | -| erlang | R16B03 | 18.3 | 20.0.4 | | -| go | 1.2.1 | 1.6.2 | 1.8.3 | | -| haskell | 7.6.3 | 7.10.3 | 8.0.2 | | -| haxe | | 3.2.1 | 3.4.2 | disabled in trusty builds - cores on install v3.0.0, disabled in artful builds - see THRIFT-4352 | -| java | 1.7.0_151 | 1.8.0_131 | 1.8.0_151 | | -| js | | | | Unsure how to look for version info? | -| lua | 5.1.5 | 5.2.4 | 5.2.4 | Lua 5.3: see THRIFT-4386 | -| nodejs | | 4.2.6 | 8.9.1 | trusty has node.js 0.10.0 which is too old | -| ocaml | | 4.02.3 | 4.04.0 | | -| perl | 5.18.2 | 5.22.1 | 5.26.0 | | -| php | 5.5.9 | 7.0.22 | 7.1.8 | | -| python | 2.7.6 | 2.7.12 | 2.7.14 | | -| python3 | 3.4.3 | 3.5.2 | 3.6.3 | | -| ruby | 1.9.3p484 | 2.3.1p112 | 2.3.3p222 | | -| rust | 1.15.1 | 1.15.1 | 1.18.0 | | -| smalltalk | | | | Not in CI | -| swift | | | | Not in CI | - +| Language | ubuntu-xenial | ubuntu-artful | Notes | +| :-------- | :------------ | :------------ | :---- | +| as of | Mar 06, 2018 | Mar 19, 2018 | | +| as3 | | | Not in CI | +| C++ gcc | 5.4.0 | 7.2.0 | | +| C++ clang | 3.8 | 4.0 | | +| C# (mono) | 4.2.1.0 | 4.6.2.7 | | +| c_glib | 2.48.2 | 2.54.0 | | +| cl (sbcl) | | 1.4.5 | | +| cocoa | | | Not in CI | +| d | 2.073.2 | 2.077.1 | | +| dart | 1.22.1 | 1.24.3 | | +| delphi | | | Not in CI | +| dotnet | 2.1.4 | 2.1.4 | v2.1.4 SDK uses v2.0.5 Runtime | +| erlang | 18.3 | 20.0.4 | | +| go | 1.7.6 | 1.10 | | +| haskell | 7.10.3 | 8.0.2 | | +| haxe | 3.2.1 | 3.4.4 | THRIFT-4352: avoid 3.4.2 | +| java | 1.8.0_151 | 1.8.0_151 | | +| js | | | Unsure how to look for version info? | +| lua | 5.2.4 | 5.2.4 | Lua 5.3: see THRIFT-4386 | +| nodejs | 6.13.0 | 8.9.4 | | +| ocaml | | 4.04.0 | THRIFT-4517: ocaml 4.02.3 on xenial appears broken | +| perl | 5.22.1 | 5.26.0 | | +| php | 7.0.22 | 7.1.11 | | +| python | 2.7.12 | 2.7.14 | | +| python3 | 3.5.2 | 3.6.3 | | +| ruby | 2.3.1p112 | 2.3.3p222 | | +| rust | 1.17.0 | 1.21.0 | | +| smalltalk | | | Not in CI | +| swift | | | Not in CI | diff --git a/vendor/git.apache.org/thrift.git/build/docker/scripts/sca.sh b/vendor/git.apache.org/thrift.git/build/docker/scripts/sca.sh index 38803d4ae..f17f7035d 100755 --- a/vendor/git.apache.org/thrift.git/build/docker/scripts/sca.sh +++ b/vendor/git.apache.org/thrift.git/build/docker/scripts/sca.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -ev # @@ -49,6 +49,10 @@ flake8 --ignore=E501 test/test.py flake8 --ignore=E501,E722 test/crossrunner flake8 test/features +# PHP code style +composer install --quiet +./vendor/bin/phpcs + # TODO etc echo FIXMEs: `grep -r FIXME * | wc -l` echo HACKs: `grep -r HACK * | wc -l` diff --git a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-artful/Dockerfile b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-artful/Dockerfile index cba29020f..d712becaa 100644 --- a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-artful/Dockerfile +++ b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-artful/Dockerfile @@ -14,18 +14,22 @@ # Apache Thrift Docker build environment for Ubuntu Artful # Using all stock Ubuntu Artful packaging except for: # - cpp: stock boost 1.62 in artful has a nasty bug so we use stock boost 1.63 -# - d: does not come with Ubuntu so we're installing the latest -# - d: deimos for libevent and openssl omitted - not compatible / build errors -# - haxe: see THRIFT-4352, but test/haxe cores during testing -# and hxcpp 3.4.64 is not compatible with artful +# - d: dmd does not come with Ubuntu +# - dart: does not come with Ubuntu +# - dotnet: does not come with Ubuntu +# - haxe: version 3.4.2 that comes with Ubuntu cores in our CI build +# - go: xenial comes with 1.8, we want the latest (supported) +# - nodejs: want v8, Ubuntu comes with v6 # FROM buildpack-deps:artful-scm MAINTAINER Apache Thrift ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update && \ - apt-get dist-upgrade -y && \ +### Add apt repos + +RUN apt-get update && \ + apt-get dist-upgrade -y && \ apt-get install -y --no-install-recommends \ apt \ apt-transport-https \ @@ -39,15 +43,25 @@ RUN apt-get update && \ # RUN echo "deb http://download.mono-project.com/repo/debian xenial main" | tee /etc/apt/sources.list.d/mono.list && \ # apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A6A19B38D3D831EF -# dotnet (core) -RUN curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.gpg && \ - echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-artful-prod artful main" > /etc/apt/sources.list.d/dotnetdev.list +# Dart +RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ + curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > \ + /etc/apt/sources.list.d/dart_stable.list -# node.js (this step runs apt-get update internally) - if we ever want a later version -RUN curl -sL https://deb.nodesource.com/setup_8.x | bash - +# dotnet (netcore) +RUN curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.gpg && \ + echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-artful-prod artful main" > \ + /etc/apt/sources.list.d/dotnetdev.list + +# haxe (https://haxe.org/download/linux/) +RUN add-apt-repository ppa:haxe/releases -y + +# node.js +RUN curl -sL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add - && \ + echo "deb https://deb.nodesource.com/node_8.x artful main" | tee /etc/apt/sources.list.d/nodesource.list ### install general dependencies -RUN apt-get install -y --no-install-recommends \ +RUN apt-get update && apt-get install -y --no-install-recommends \ `# General dependencies` \ bash-completion \ bison \ @@ -78,49 +92,42 @@ RUN apt-get install -y --no-install-recommends \ `# csharp (mono) dependencies` \ mono-devel -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && \ - wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \ - wget -qO - https://dlang.org/d-keyring.gpg | apt-key add - && \ - apt-get update && \ - apt-get install -y --no-install-recommends \ - `# D dependencies` \ - dmd-bin \ - libphobos2-dev \ - dub \ - dfmt \ - dscanner \ - libevent-dev \ - libssl-dev \ - xdg-utils -# libevent deimos doesn't seem to work so not enabling it: -# RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ -# curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ -# mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ -# mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \ -# rm -rf libevent-master -# openssl deimos doesn't work with openssl-1.0.2 so not enabling it: -# RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ -# mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ -# mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ -# rm -rf openssl-master +ENV SBCL_VERSION 1.4.5 +RUN \ +`# Common Lisp (sbcl) dependencies` \ + curl --version && \ + curl -O -J -L https://kent.dl.sourceforge.net/project/sbcl/sbcl/${SBCL_VERSION}/sbcl-${SBCL_VERSION}-x86-64-linux-binary.tar.bz2 && \ + tar xjf sbcl-${SBCL_VERSION}-x86-64-linux-binary.tar.bz2 && \ + cd sbcl-${SBCL_VERSION}-x86-64-linux && \ + ./install.sh && \ + sbcl --version && \ + rm -rf sbcl* -# dart cannot be downloaded by aptitude because of -# https://github.com/dart-lang/sdk/issues/30512 -# RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ -# curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \ -# apt-get update && \ -# apt-get install -y --no-install-recommends \ -# `# Dart dependencies` \ -# dart -# so instead we do: -RUN wget https://storage.googleapis.com/dart-archive/channels/stable/release/latest/linux_packages/dart_1.24.2-1_amd64.deb && \ - dpkg -i dart_1.24.2-1_amd64.deb && \ - rm dart_1.24.2-1_amd64.deb +ENV D_VERSION 2.077.1 +ENV DMD_DEB dmd_2.077.1-0_amd64.deb +RUN \ +`# D dependencies` \ + wget -q http://downloads.dlang.org/releases/2.x/${D_VERSION}/${DMD_DEB} && \ + dpkg --install ${DMD_DEB} && \ + rm -f ${DMD_DEB} && \ + mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ + curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ + mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ + mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \ + rm -rf libevent-master && \ + curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ + mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ + mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ + rm -rf openssl-master + +RUN apt-get install -y --no-install-recommends \ + `# Dart dependencies` \ + dart/stable ENV PATH /usr/lib/dart/bin:$PATH RUN apt-get install -y --no-install-recommends \ `# dotnet core dependencies` \ - dotnet-sdk-2.0.3 + dotnet-sdk-2.1.4 RUN apt-get install -y --no-install-recommends \ `# Erlang dependencies` \ @@ -134,24 +141,28 @@ RUN apt-get install -y --no-install-recommends \ `# GlibC dependencies` \ libglib2.0-dev -RUN apt-get install -y --no-install-recommends \ -`# golang (go) dependencies` \ - golang-go \ - golang-race-detector-runtime +# golang +ENV GOLANG_VERSION 1.10 +ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz +ENV GOLANG_DOWNLOAD_SHA256 b5a64335f1490277b585832d1f6c7f8c6c11206cba5cd3f771dcb87b98ad1a33 +RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz && \ + echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - && \ + tar -C /usr/local -xzf golang.tar.gz && \ + ln -s /usr/local/go/bin/go /usr/local/bin && \ + rm golang.tar.gz RUN apt-get install -y --no-install-recommends \ `# Haskell dependencies` \ ghc \ cabal-install -# see THRIFT-4352, test/haxe cores on artful -# RUN apt-get install -y --no-install-recommends \ -# `# Haxe dependencies` \ -# haxe \ -# neko \ -# neko-dev -# RUN haxelib setup --always /usr/share/haxe/lib && \ -# haxelib install --always hxcpp +RUN apt-get install -y --no-install-recommends \ +`# Haxe dependencies` \ + haxe \ + neko \ + neko-dev && \ + haxelib setup --always /usr/share/haxe/lib && \ + haxelib install --always hxcpp 2>&1 > /dev/null RUN apt-get install -y --no-install-recommends \ `# Java dependencies` \ @@ -194,7 +205,7 @@ RUN apt-get install -y --no-install-recommends \ php-dev \ php-pear \ re2c \ - phpunit + composer RUN apt-get install -y --no-install-recommends \ `# Python dependencies` \ @@ -229,7 +240,6 @@ RUN apt-get install -y --no-install-recommends \ ruby \ ruby-dev \ ruby-bundler -RUN gem install bundler --no-ri --no-rdoc RUN apt-get install -y --no-install-recommends \ `# Rust dependencies` \ diff --git a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile index db2041a1f..a8e4d3baa 100644 --- a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile +++ b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile @@ -25,8 +25,10 @@ FROM buildpack-deps:trusty-scm MAINTAINER Apache Thrift ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update && \ - apt-get dist-upgrade -y && \ +### Add apt repos + +RUN apt-get update && \ + apt-get dist-upgrade -y && \ apt-get install -y --no-install-recommends \ apt \ apt-transport-https \ @@ -36,6 +38,16 @@ RUN apt-get update && \ software-properties-common \ wget +# D +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && \ + wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \ + wget -qO - https://dlang.org/d-keyring.gpg | apt-key add - + +# Dart +RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ + curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > \ + /etc/apt/sources.list.d/dart_stable.list + RUN apt-get update && apt-get install -y --no-install-recommends \ `# General dependencies` \ bash-completion \ @@ -66,11 +78,7 @@ RUN apt-get install -y --no-install-recommends \ `# csharp (mono) dependencies` \ mono-devel -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && \ - wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \ - wget -qO - https://dlang.org/d-keyring.gpg | apt-key add - && \ - apt-get update && \ - apt-get install -y --no-install-recommends \ +RUN apt-get install -y --no-install-recommends \ `# D dependencies` \ dmd-bin=2.070.2-0 \ libphobos2-dev=2.070.2-0 \ @@ -78,20 +86,17 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && dfmt \ dscanner \ xdg-utils -# RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ -# curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ -# mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ -# mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \ -# rm -rf libevent-master -# RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ -# mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ -# mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ -# rm -rf openssl-master +RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ + curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ + mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ + mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \ + rm -rf libevent-master +RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ + mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ + mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ + rm -rf openssl-master -RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ - curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \ - apt-get update && \ - apt-get install -y --no-install-recommends \ +RUN apt-get install -y --no-install-recommends \ `# Dart dependencies` \ dart=1.20.1-1 ENV PATH /usr/lib/dart/bin:$PATH @@ -108,9 +113,15 @@ RUN apt-get install -y --no-install-recommends \ `# GlibC dependencies` \ libglib2.0-dev -RUN apt-get install -y --no-install-recommends \ -`# golang (go) dependencies` \ - golang-go +# golang +ENV GOLANG_VERSION 1.7.6 +ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz +ENV GOLANG_DOWNLOAD_SHA256 ad5808bf42b014c22dd7646458f631385003049ded0bb6af2efc7f1f79fa29ea +RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz && \ + echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - && \ + tar -C /usr/local -xzf golang.tar.gz && \ + ln -s /usr/local/go/bin/go /usr/local/bin && \ + rm golang.tar.gz RUN apt-get install -y --no-install-recommends \ `# Haskell dependencies` \ @@ -166,8 +177,8 @@ RUN apt-get install -y --no-install-recommends \ php5-cli \ php5-dev \ php-pear \ - re2c \ - phpunit + re2c && \ + wget https://getcomposer.org/installer -O - -q | php -- --quiet --install-dir=/usr/local/bin/ --filename=composer RUN apt-get install -y --no-install-recommends \ `# Python dependencies` \ @@ -189,7 +200,7 @@ RUN apt-get install -y --no-install-recommends \ python3-wheel \ python3-zope.interface && \ pip install -U ipaddress backports.ssl_match_hostname tornado && \ - pip3 install -U backports.ssl_match_hostname tornado + pip3 install -U backports.ssl_match_hostname tornado # installing tornado by pip/pip3 instead of debian package # if we install the debian package, the build fails in py2 @@ -211,6 +222,16 @@ RUN apt-get install -y --no-install-recommends \ sloccount && \ pip install flake8 +# Install BouncyCastle provider to fix Java builds issues with JDK 7 +# Builds accessing repote repositories fail as seen here: https://github.com/travis-ci/travis-ci/issues/8503 +RUN apt-get install -y --no-install-recommends \ +`# BouncyCastle JCE Provider dependencies` \ + libbcprov-java && \ + ln -s /usr/share/java/bcprov.jar /usr/lib/jvm/java-7-openjdk-amd64/jre/lib/ext/bcprov.jar && \ + awk -F . -v OFS=. 'BEGIN{n=2}/^security\.provider/ {split($3, posAndEquals, "=");$3=n++"="posAndEquals[2];print;next} 1' /etc/java-7-openjdk/security/java.security > /tmp/java.security && \ + echo "security.provider.1=org.bouncycastle.jce.provider.BouncyCastleProvider" >> /tmp/java.security && \ + mv /tmp/java.security /etc/java-7-openjdk/security/java.security + # Clean up RUN rm -rf /var/cache/apt/* && \ rm -rf /var/lib/apt/lists/* && \ diff --git a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile.orig b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile.orig deleted file mode 100644 index 857384b41..000000000 --- a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-trusty/Dockerfile.orig +++ /dev/null @@ -1,231 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Apache Thrift Docker build environment for Ubuntu -# -# Known missing client libraries: -# - dotnetcore - -FROM buildpack-deps:trusty-scm -MAINTAINER Apache Thrift - -ENV DEBIAN_FRONTEND noninteractive - -# Add apt sources -# CMAKE -RUN apt-get update && \ - apt-get install -y --no-install-recommends software-properties-common && \ - add-apt-repository -y ppa:george-edison55/cmake-3.x - -# Erlang -RUN echo 'deb http://packages.erlang-solutions.com/debian trusty contrib' > /etc/apt/sources.list.d/erlang_solutions.list && \ - curl -sSL https://packages.erlang-solutions.com/debian/erlang_solutions.asc | apt-key add - - -# Dart -RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ - curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \ - sed -i /etc/apt/sources.list.d/dart_stable.list -e 's/https:/http:/g' - -# Consider using mirror nearby when building locally -# TODO: Provide option via --build-arg=... -# RUN sed -i /etc/apt/sources.list -e 's!http://archive.ubuntu.com/ubuntu/!http://your/mirror/!g' - -RUN apt-get update && apt-get install -y --no-install-recommends \ -`# General dependencies` \ - bison \ - build-essential \ - clang \ - cmake \ - debhelper \ - flex \ - ninja-build \ - pkg-config \ -`# Included in buildpack-deps` \ -`# autoconf` \ -`# automake` \ -`# g++` \ -`# git` \ -`# libtool` \ -`# make` - -RUN apt-get install -y --no-install-recommends \ -`# C++ dependencies` \ -`# libevent and OpenSSL are needed by D too` \ - libboost-dev \ - libboost-filesystem-dev \ - libboost-program-options-dev \ - libboost-system-dev \ - libboost-test-dev \ - libboost-thread-dev \ - libevent-dev \ - libssl-dev \ - qt5-default \ - qtbase5-dev \ - qtbase5-dev-tools - -RUN apt-get install -y --no-install-recommends \ -`# Java dependencies` \ - ant \ - ant-optional \ - openjdk-7-jdk \ - maven - -RUN apt-get install -y --no-install-recommends \ -`# Python dependencies` \ -`# TODO:` \ -`# Install twisted and zope.interface via pip. we need twisted at ./configure time, otherwise` \ -`# py.twisted tests are skipped.` \ - python-all \ - python-all-dbg \ - python-all-dev \ - python-pip \ - python-setuptools \ - python-twisted \ - python-zope.interface \ - python3-all \ - python3-all-dbg \ - python3-all-dev \ - python3-setuptools \ - python3-pip - -RUN apt-get install -y --no-install-recommends \ -`# Ruby dependencies` \ - ruby \ - ruby-dev \ - ruby-bundler \ -`# Perl dependencies` \ - libbit-vector-perl \ - libclass-accessor-class-perl \ - libcrypt-ssleay-perl \ - libio-socket-ssl-perl \ - libnet-ssleay-perl - -RUN apt-get install -y --no-install-recommends \ -`# Php dependencies` \ - php5 \ - php5-dev \ - php5-cli \ - php-pear \ - re2c \ - phpunit \ -`# GlibC dependencies` \ - libglib2.0-dev - -RUN apt-get update && apt-get install -y --no-install-recommends \ -`# Erlang dependencies` \ - erlang-base \ - erlang-eunit \ - erlang-dev \ - erlang-tools \ - rebar - -RUN apt-get install -y --no-install-recommends \ -`# Haskell dependencies` \ - ghc \ - cabal-install \ -`# Haxe dependencies` \ - neko \ - neko-dev \ - libneko0 - -# Newer release of nodejs -RUN curl -sL https://deb.nodesource.com/setup_4.x | bash -RUN apt-get install -y --no-install-recommends \ -`# Node.js dependencies` \ - nodejs - -# Add mono package repository url to get latest version of mono -RUN echo "deb http://download.mono-project.com/repo/debian trusty main" | tee /etc/apt/sources.list.d/mono.list -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A6A19B38D3D831EF -RUN apt-get update && apt-get install -y --no-install-recommends \ -`# CSharp dependencies` \ - mono-devel - -RUN apt-get install -y --no-install-recommends \ -`# D dependencies` \ - xdg-utils \ -`# Dart dependencies` \ - dart \ -`# Lua dependencies` \ - lua5.2 \ - lua5.2-dev \ -`# MinGW dependencies` \ - mingw32 \ - mingw32-binutils \ - mingw32-runtime \ - nsis \ -`# Clean up` \ - && rm -rf /var/cache/apt/* && \ - rm -rf /var/lib/apt/lists/* && \ - rm -rf /tmp/* && \ - rm -rf /var/tmp/* - -# Ruby -RUN gem install bundler --no-ri --no-rdoc - -# Python optional dependencies -RUN pip2 install -U ipaddress backports.ssl_match_hostname tornado -RUN pip3 install -U backports.ssl_match_hostname tornado - -# Go -RUN curl -sSL https://storage.googleapis.com/golang/go1.4.3.linux-amd64.tar.gz | tar -C /usr/local/ -xz -ENV PATH /usr/local/go/bin:$PATH - -# Haxe -RUN mkdir -p /usr/lib/haxe && \ - wget -O - https://github.com/HaxeFoundation/haxe/releases/download/3.2.1/haxe-3.2.1-linux64.tar.gz | \ - tar -C /usr/lib/haxe --strip-components=1 -xz && \ - ln -s /usr/lib/haxe/haxe /usr/bin/haxe && \ - ln -s /usr/lib/haxe/haxelib /usr/bin/haxelib && \ - mkdir -p /usr/lib/haxe/lib && \ - chmod -R 777 /usr/lib/haxe/lib && \ - haxelib setup --always /usr/lib/haxe/lib && \ - haxelib install --always hxcpp 3.4.64 - -# Node.js -# temporarily removed since this breaks the build (and is not needed to test C# code) -# RUN curl -sSL https://www.npmjs.com/install.sh | sh - -# D -RUN curl -sSL http://downloads.dlang.org/releases/2.x/2.070.0/dmd_2.070.0-0_amd64.deb -o /tmp/dmd_2.070.0-0_amd64.deb && \ - dpkg -i /tmp/dmd_2.070.0-0_amd64.deb && \ - rm /tmp/dmd_2.070.0-0_amd64.deb && \ - curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ - curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ - mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ - mv libevent-master/deimos/* openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ - mv libevent-master/C/* openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ - rm -rf libevent-master openssl-master && \ - echo 'gcc -Wl,--no-as-needed $*' > /usr/local/bin/gcc-dmd && \ - chmod 755 /usr/local/bin/gcc-dmd && \ - echo 'CC=/usr/local/bin/gcc-dmd' >> /etc/dmd.conf - -# Dart -ENV PATH /usr/lib/dart/bin:$PATH - -# OCaml -RUN echo 'deb http://ppa.launchpad.net/avsm/ppa/ubuntu trusty main' > /etc/apt/sources.list.d/avsm-official-ocaml.list && \ - gpg --keyserver keyserver.ubuntu.com --recv 61707B09 && \ - gpg --export --armor 61707B09 | apt-key add - && \ - apt-get update && \ - apt-get install -y ocaml opam && \ - opam init && \ - opam install oasis - -# Rust -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.17.0 -ENV PATH /root/.cargo/bin:$PATH - -ENV THRIFT_ROOT /thrift -RUN mkdir -p $THRIFT_ROOT/src -COPY Dockerfile $THRIFT_ROOT/ -WORKDIR $THRIFT_ROOT/src diff --git a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-xenial/Dockerfile b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-xenial/Dockerfile index c640bd931..760c50175 100644 --- a/vendor/git.apache.org/thrift.git/build/docker/ubuntu-xenial/Dockerfile +++ b/vendor/git.apache.org/thrift.git/build/docker/ubuntu-xenial/Dockerfile @@ -15,45 +15,53 @@ # Using all stock Ubuntu Xenial packaging except for: # - d: does not come with Ubuntu so we're installing 2.073.2 for coverage # - dart: does not come with Ubuntu so we're installing 1.22.1 for coverage +# - dotnet: does not come with Ubuntu +# - go: Xenial comes with 1.6, but we need 1.7 or later +# - nodejs: Xenial comes with 4.2.6 which exits LTS April 2018, so we're installing 6.x +# - ocaml: causes stack overflow error, just started March 2018 not sure why # -# -# Known missing or disabled libraries: -# - d: deimos for libevent and openssl omitted - not compatible / build errors - FROM buildpack-deps:xenial-scm MAINTAINER Apache Thrift ENV DEBIAN_FRONTEND noninteractive ### Add apt repos -RUN apt-get update && apt-get dist-upgrade -y && apt-get install -y --no-install-recommends apt apt-transport-https curl wget apt-utils +RUN apt-get update && \ + apt-get dist-upgrade -y && \ + apt-get install -y --no-install-recommends \ + apt \ + apt-transport-https \ + apt-utils \ + curl \ + software-properties-common \ + wget # csharp (mono) # RUN echo "deb http://download.mono-project.com/repo/debian xenial main" | tee /etc/apt/sources.list.d/mono.list && \ # apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A6A19B38D3D831EF # D -RUN wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \ +RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EBCF975E5BA24D5E && \ + wget http://master.dl.sourceforge.net/project/d-apt/files/d-apt.list -O /etc/apt/sources.list.d/d-apt.list && \ wget -qO - https://dlang.org/d-keyring.gpg | apt-key add - ENV D_VERSION 2.073.2-0 # Dart RUN curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ - curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list && \ - sed -i /etc/apt/sources.list.d/dart_stable.list -e 's/https:/http:/g' -# since ubuntu-artful can't run dart, we'll run 1.240 on xenial for now -ENV DART_VERSION 1.24.2-1 + curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > \ + /etc/apt/sources.list.d/dart_stable.list +ENV DART_VERSION 1.22.1-1 # dotnet (core) RUN curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /etc/apt/trusted.gpg.d/microsoft.gpg && \ - echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-xenial-prod xenial main" > /etc/apt/sources.list.d/dotnetdev.list - -# node.js (this step runs apt-get update internally) -# note: node 8.5 introduced some issues with directory handling / jsdoc / something... using 7.x for now -# RUN curl -sL https://deb.nodesource.com/setup_7.x | bash - + echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-xenial-prod xenial main" > \ + /etc/apt/sources.list.d/dotnetdev.list +# node.js +RUN curl -sL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add - && \ + echo "deb https://deb.nodesource.com/node_6.x xenial main" | tee /etc/apt/sources.list.d/nodesource.list + ### install general dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ `# General dependencies` \ @@ -102,17 +110,15 @@ RUN apt-get install -y --allow-unauthenticated --no-install-recommends \ libevent-dev \ libssl-dev \ xdg-utils -# libevent deimos doesn't seem to work so not enabling it: -# RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ -# curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ -# mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ -# mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \ -# rm -rf libevent-master -# openssl deimos doesn't work with openssl-1.0.2 so not enabling it: -# RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ -# mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ -# mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ -# rm -rf openssl-master +RUN mkdir -p /usr/include/dmd/druntime/import/deimos /usr/include/dmd/druntime/import/C && \ + curl -sSL https://github.com/D-Programming-Deimos/libevent/archive/master.tar.gz| tar xz && \ + mv libevent-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ + mv libevent-master/C/* /usr/include/dmd/druntime/import/C/ && \ + rm -rf libevent-master +RUN curl -sSL https://github.com/D-Programming-Deimos/openssl/archive/master.tar.gz| tar xz && \ + mv openssl-master/deimos/* /usr/include/dmd/druntime/import/deimos/ && \ + mv openssl-master/C/* /usr/include/dmd/druntime/import/C/ && \ + rm -rf openssl-master RUN apt-get install -y --no-install-recommends \ `# Dart dependencies` \ @@ -121,7 +127,7 @@ ENV PATH /usr/lib/dart/bin:$PATH RUN apt-get install -y --no-install-recommends \ `# dotnet core dependencies` \ - dotnet-sdk-2.0.3 + dotnet-sdk-2.1.4 RUN apt-get install -y --no-install-recommends \ `# Erlang dependencies` \ @@ -135,10 +141,15 @@ RUN apt-get install -y --no-install-recommends \ `# GlibC dependencies` \ libglib2.0-dev -RUN apt-get install -y --no-install-recommends \ -`# golang (go) dependencies` \ - golang-go \ - golang-race-detector-runtime +# golang +ENV GOLANG_VERSION 1.7.6 +ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz +ENV GOLANG_DOWNLOAD_SHA256 ad5808bf42b014c22dd7646458f631385003049ded0bb6af2efc7f1f79fa29ea +RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz && \ + echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - && \ + tar -C /usr/local -xzf golang.tar.gz && \ + ln -s /usr/local/go/bin/go /usr/local/bin && \ + rm golang.tar.gz RUN apt-get install -y --no-install-recommends \ `# Haskell dependencies` \ @@ -150,9 +161,9 @@ RUN apt-get install -y --no-install-recommends \ haxe \ neko \ neko-dev \ - libneko0 -RUN haxelib setup --always /usr/share/haxe/lib && \ - haxelib install --always hxcpp 3.4.64 + libneko0 && \ + haxelib setup --always /usr/share/haxe/lib && \ + haxelib install --always hxcpp 3.4.64 2>&1 > /dev/null # note: hxcpp 3.4.185 (latest) no longer ships static libraries, and caused a build failure RUN apt-get install -y --no-install-recommends \ @@ -171,16 +182,15 @@ RUN apt-get install -y --no-install-recommends \ RUN apt-get install -y --no-install-recommends \ `# Node.js dependencies` \ - nodejs \ - npm && \ - ln -s /usr/bin/nodejs /usr/bin/node + nodejs -RUN apt-get install -y --no-install-recommends \ -`# OCaml dependencies` \ - ocaml \ - opam && \ - opam init --yes && \ - opam install --yes oasis +# THRIFT-4517: causes stack overflows; version too old; skip ocaml in xenial +# RUN apt-get install -y --no-install-recommends \ +# `# OCaml dependencies` \ +# ocaml \ +# opam && \ +# opam init --yes && \ +# opam install --yes oasis RUN apt-get install -y --no-install-recommends \ `# Perl dependencies` \ @@ -197,7 +207,7 @@ RUN apt-get install -y --no-install-recommends \ php7.0-dev \ php-pear \ re2c \ - phpunit + composer RUN apt-get install -y --no-install-recommends \ `# Python dependencies` \ @@ -229,7 +239,6 @@ RUN apt-get install -y --no-install-recommends \ ruby \ ruby-dev \ ruby-bundler -RUN gem install bundler --no-ri --no-rdoc RUN apt-get install -y --no-install-recommends \ `# Rust dependencies` \ diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/Makefile.am b/vendor/git.apache.org/thrift.git/compiler/cpp/Makefile.am index 482a89a4d..0b8ef2e7e 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/Makefile.am +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/Makefile.am @@ -71,7 +71,7 @@ compiler_core = src/thrift/common.h \ src/thrift/parse/parse.cc \ src/thrift/generate/t_generator.h \ src/thrift/generate/t_oop_generator.h \ - src/thrift/generate/t_html_generator.h + src/thrift/generate/t_html_generator.h thrift_SOURCES = src/thrift/main.h \ src/thrift/main.cc \ @@ -88,6 +88,7 @@ thrift_SOURCES += src/thrift/generate/t_c_glib_generator.cc \ src/thrift/generate/t_haxe_generator.cc \ src/thrift/generate/t_csharp_generator.cc \ src/thrift/generate/t_netcore_generator.cc \ + src/thrift/generate/t_netcore_generator.h \ src/thrift/generate/t_py_generator.cc \ src/thrift/generate/t_rb_generator.cc \ src/thrift/generate/t_perl_generator.cc \ @@ -108,7 +109,8 @@ thrift_SOURCES += src/thrift/generate/t_c_glib_generator.cc \ src/thrift/generate/t_gv_generator.cc \ src/thrift/generate/t_d_generator.cc \ src/thrift/generate/t_lua_generator.cc \ - src/thrift/generate/t_rs_generator.cc + src/thrift/generate/t_rs_generator.cc \ + src/thrift/generate/t_cl_generator.cc thrift_CPPFLAGS = -I$(srcdir)/src thrift_CXXFLAGS = -Wall -Wextra -pedantic -Werror @@ -153,7 +155,7 @@ include_generatedir = $(include_thriftdir)/generate include_generate_HEADERS = src/thrift/generate/t_generator.h \ src/thrift/generate/t_generator_registry.h \ src/thrift/generate/t_oop_generator.h \ - src/thrift/generate/t_html_generator.h + src/thrift/generate/t_html_generator.h include_parsedir = $(include_thriftdir)/parse include_parse_HEADERS = src/thrift/parse/t_service.h \ diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/README.md b/vendor/git.apache.org/thrift.git/compiler/cpp/README.md index 2fff0d846..32eac9fbc 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/README.md +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/README.md @@ -1,19 +1,49 @@ -# Build compiler using CMake +# Build Thrift IDL compiler using CMake -## build on Unix-like System + -### build using cmake +- [Build Thrift IDL compiler using CMake](#build-thrift-idl-compiler-using-cmake) + - [Build on Unix-like System](#build-on-unix-like-system) + - [Prerequisites](#prerequisites) + - [Build using CMake](#build-using-cmake) + - [Build with Eclipse IDE](#build-with-eclipse-ide) + - [Build with XCode IDE in MacOS](#build-with-xcode-ide-in-macos) + - [Usage of other IDEs](#usage-of-other-ides) + - [Build on Windows](#build-on-windows) + - [Prerequisites](#prerequisites-1) + - [Build using Git Bash](#build-using-git-bash) + - [Using Visual Studio and Win flex-bison](#using-visual-studio-and-win-flex-bison) + - [Cross compile using mingw32 and generate a Windows Installer with CPack](#cross-compile-using-mingw32-and-generate-a-windows-installer-with-cpack) +- [Other cases](#other-cases) + - [Building the Thrift IDL compiler in Windows without CMake](#building-the-thrift-idl-compiler-in-windows-without-cmake) +- [Unit tests for compiler](#unit-tests-for-compiler) + - [Using boost test](#using-boost-test) + - [Using Catch C++ test library](#using-catch-c-test-library) +- [Have a Happy free time and holidays](#have-a-happy-free-time-and-holidays) -Use the following steps to build using cmake: + + +## Build on Unix-like System + +### Prerequisites +- Install CMake +- Install flex and bison + +### Build using CMake + +- Go to **thrift\compiler\cpp** +- Use the following steps to build using cmake: ``` -mkdir cmake-build -cd cmake-build +mkdir cmake-build && cd cmake-build cmake .. make ``` -### Create an eclipse project +#### Build with Eclipse IDE + +- Go to **thrift\compiler\cpp** +- Use the following steps to build using cmake: ``` mkdir cmake-ec && cd cmake-ec @@ -23,8 +53,65 @@ make Now open the folder cmake-ec using eclipse. +#### Build with XCode IDE in MacOS -## Cross compile using mingw32 and generate a Windows Installer with CPack +- Install/update flex, bison and cmake with brew + +``` +brew install cmake +brew install bison +``` + +- Go to **thrift\compiler\cpp** +- Run commands in command line: + +``` +mkdir cmake-build && cd cmake-build +cmake -G "Xcode" -DWITH_PLUGIN=OFF .. +cmake --build . +``` + +#### Usage of other IDEs + +Please check list of supported IDE + +``` +cmake --help +``` + +## Build on Windows + +### Prerequisites +- Install CMake - https://cmake.org/download/ +- In case if you want to build without Git Bash - install winflexbison - https://sourceforge.net/projects/winflexbison/ +- In case if you want to build with Visual Studio - install Visual Studio + - Better to use the latest stable Visual Studio Community Edition - https://www.visualstudio.com/vs/whatsnew/ (ensure that you installed workload "Desktop Development with C++" for VS2017) - Microsoft added some support for CMake and improving it in Visual Studio + +### Build using Git Bash + +Git Bash provides flex and bison + +- Go to **thrift\compiler\cpp** +- Use the following steps to build using cmake: + +``` +mkdir cmake-vs && cd cmake-vs +cmake -DWITH_SHARED_LIB=off .. +cmake --build . +``` + +### Using Visual Studio and Win flex-bison + +- Generate a Visual Studio project for version of Visual Studio which you have (**cmake --help** can show list of supportable VS versions): +- Run commands in command line: +``` +mkdir cmake-vs +cd cmake-vs +cmake -G "Visual Studio 15 2017" -DWITH_PLUGIN=OFF .. +``` +- Now open the folder cmake-vs using Visual Studio. + +### Cross compile using mingw32 and generate a Windows Installer with CPack ``` mkdir cmake-mingw32 && cd cmake-mingw32 @@ -32,49 +119,23 @@ cmake -DCMAKE_TOOLCHAIN_FILE=../build/cmake/mingw32-toolchain.cmake -DBUILD_COMP cpack ``` -# Build on windows +# Other cases -### using Git Bash +## Building the Thrift IDL compiler in Windows without CMake -Git Bash provides flex and bison, so you just need to do this: +If you don't want to use CMake you can use the already available Visual Studio 2010 solution. -``` -mkdir cmake-vs && cd cmake-vs -cmake -DWITH_SHARED_LIB=off .. -``` +The Visual Studio project contains pre-build commands to generate the thriftl.cc, thrifty.cc and thrifty.hh files which are necessary to build the compiler. -### using Win flex-bison +These depend on bison, flex and their dependencies to work properly. -In order to build on windows with winflexbison a few additional steps are necessary: +Download flex & bison as described above. -1. Download winflexbison from http://sourceforge.net/projects/winflexbison/ -2. Extract the winflex bison files to for e.g. C:\winflexbison -3. Make the CMake variables point to the correct binaries. - * FLEX_EXECUTABLE = C:/winbuild/win_flex.exe - * BISON_EXECUTABLE = C:/winbuild/win_bison.exe -4. Generate a Visual Studio project: -``` -mkdir cmake-vs && cd cmake-vs -cmake -G "Visual Studio 12" -DWITH_SHARED_LIB=off .. -``` -5. Now open the folder build_vs using Visual Studio 2013. - -# Building the Thrift IDL compiler in Windows - -If you don't want to use CMake you can use the already available Visual Studio -2010 solution. -The Visual Studio project contains pre-build commands to generate the -thriftl.cc, thrifty.cc and thrifty.hh files which are necessary to build -the compiler. These depend on bison, flex and their dependencies to -work properly. -Download flex & bison as described above. -Place these binaries somewhere in the path and -rename win_flex.exe and win_bison.exe to flex.exe and bison.exe respectively. +Place these binaries somewhere in the path and rename win_flex.exe and win_bison.exe to flex.exe and bison.exe respectively. If this doesn't work on a system, try these manual pre-build steps. -Open compiler.sln and remove the Pre-build commands under the project's - Properties -> Build Events -> Pre-Build Events. +Open compiler.sln and remove the Pre-build commands under the project's: Properties -> Build Events -> Pre-Build Events. From a command prompt: ``` @@ -99,3 +160,16 @@ Download inttypes.h from the interwebs and place it in an include path location (e.g. thrift/compiler/cpp/src). Build the compiler in Visual Studio. + +# Unit tests for compiler + +## Using boost test +- pls check **test** folder + +## Using Catch C++ test library + +Added generic way to cover code by tests for many languages (you just need to make a correct header file for generator for your language - example in **netcore** implementation) + +- pls check **tests** folder + +# Have a Happy free time and holidays \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/compiler.vcxproj b/vendor/git.apache.org/thrift.git/compiler/cpp/compiler.vcxproj index 4b03253e2..0628b54c8 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/compiler.vcxproj +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/compiler.vcxproj @@ -51,7 +51,7 @@ - + @@ -196,7 +196,6 @@ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy - @@ -210,6 +209,7 @@ WIN32;MINGW;YY_NO_UNISTD_H;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) thrift\windows\config.h CompileAsCpp + MultiThreaded Console @@ -219,7 +219,6 @@ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy - @@ -233,6 +232,7 @@ WIN32;MINGW;YY_NO_UNISTD_H;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) thrift\windows\config.h CompileAsCpp + MultiThreaded Console @@ -242,10 +242,9 @@ flex -o "src\\thrift\\thriftl.cc" src/thrift/thriftl.ll && bison -y -o "src\\thrift\\thrifty.cc" --defines="src\\thrift\\thrifty.hh" src/thrift/thrifty.yy - - + \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/audit/t_audit.cpp b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/audit/t_audit.cpp index 1386f3bd1..ef39d60c3 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/audit/t_audit.cpp +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/audit/t_audit.cpp @@ -202,8 +202,8 @@ bool compare_defaults(t_const_value* newStructDefault, t_const_value* oldStructD } case t_const_value::CV_MAP: { - const std::map newMap = newStructDefault->get_map(); - const std::map oldMap = oldStructDefault->get_map(); + const std::map newMap = newStructDefault->get_map(); + const std::map oldMap = oldStructDefault->get_map(); bool defaultValuesCompare = (oldMap.size() == newMap.size()); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_as3_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_as3_generator.cc index fc92de954..87089b44e 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_as3_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_as3_generator.cc @@ -471,8 +471,8 @@ void t_as3_generator::print_const_value(std::ofstream& out, } else if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; out << name << ":" << type_name(type) << " = new " << type_name(type, false, true) << "();" << endl; if (!in_static) { @@ -516,8 +516,8 @@ void t_as3_generator::print_const_value(std::ofstream& out, } t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); string val = render_const_value(out, name, vtype, v_iter->second); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc index a7beca757..3ae7854f9 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_c_glib_generator.cc @@ -997,8 +997,8 @@ void t_c_glib_generator::generate_const_initializer(string name, if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; ostringstream initializers; // initialize any constants that may be referenced by this initializer @@ -1171,8 +1171,8 @@ void t_c_glib_generator::generate_const_initializer(string name, } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; ostringstream initializers; ostringstream appenders; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cl_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cl_generator.cc new file mode 100644 index 000000000..d9266d11b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cl_generator.cc @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "thrift/platform.h" +#include "t_oop_generator.h" +using namespace std; + + +/** + * Common Lisp code generator. + * + * @author Patrick Collison + */ +class t_cl_generator : public t_oop_generator { + public: + t_cl_generator( + t_program* program, + const std::map& parsed_options, + const std::string& option_string) + : t_oop_generator(program) + { + no_asd = false; + system_prefix = "thrift-gen-"; + + std::map::const_iterator iter; + + for(iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) { + if(iter->first.compare("no_asd") == 0) { + no_asd = true; + } else if (iter->first.compare("sys_pref") == 0) { + system_prefix = iter->second; + } else { + throw "unknown option cl:" + iter->first; + } + } + + out_dir_base_ = "gen-cl"; + copy_options_ = option_string; + } + + void init_generator(); + void close_generator(); + + void generate_typedef (t_typedef* ttypedef); + void generate_enum (t_enum* tenum); + void generate_const (t_const* tconst); + void generate_struct (t_struct* tstruct); + void generate_xception (t_struct* txception); + void generate_service (t_service* tservice); + void generate_cl_struct (std::ofstream& out, t_struct* tstruct, bool is_exception); + void generate_cl_struct_internal (std::ofstream& out, t_struct* tstruct, bool is_exception); + void generate_exception_sig(std::ofstream& out, t_function* f); + std::string render_const_value(t_type* type, t_const_value* value); + + std::string cl_autogen_comment(); + void asdf_def(std::ofstream &out); + void package_def(std::ofstream &out); + void package_in(std::ofstream &out); + std::string generated_package(); + std::string prefix(std::string name); + std::string package_of(t_program* program); + std::string package(); + std::string render_includes(); + + std::string type_name(t_type* ttype); + std::string typespec (t_type *t); + std::string function_signature(t_function* tfunction); + std::string argument_list(t_struct* tstruct); + + std::string cl_docstring(std::string raw); + + private: + + int temporary_var; + /** + * Isolate the variable definitions, as they can require structure definitions + */ + std::ofstream f_asd_; + std::ofstream f_types_; + std::ofstream f_vars_; + + std::string copy_options_; + + bool no_asd; + std::string system_prefix; +}; + + +void t_cl_generator::init_generator() { + MKDIR(get_out_dir().c_str()); + string program_dir = get_out_dir() + "/" + program_name_; + MKDIR(program_dir.c_str()); + + temporary_var = 0; + + string f_types_name = program_dir + "/" + program_name_ + "-types.lisp"; + string f_vars_name = program_dir + "/" + program_name_ + "-vars.lisp"; + + f_types_.open(f_types_name.c_str()); + f_types_ << cl_autogen_comment() << endl; + f_vars_.open(f_vars_name.c_str()); + f_vars_ << cl_autogen_comment() << endl; + + package_def(f_types_); + package_in(f_types_); + package_in(f_vars_); + + if (!no_asd) { + string f_asd_name = program_dir + "/" + system_prefix + program_name_ + ".asd"; + f_asd_.open(f_asd_name.c_str()); + f_asd_ << cl_autogen_comment() << endl; + asdf_def(f_asd_); + } +} + +/** + * Renders all the imports necessary for including another Thrift program + */ +string t_cl_generator::render_includes() { + const vector& includes = program_->get_includes(); + string result = ""; + result += ":depends-on (:thrift"; + for (size_t i = 0; i < includes.size(); ++i) { + result += " :" + system_prefix + underscore(includes[i]->get_name()); + } + result += ")\n"; + return result; +} + +string t_cl_generator::package_of(t_program* program) { + string prefix = program->get_namespace("cl"); + return prefix.empty() ? "thrift-generated" : prefix; +} + +string t_cl_generator::package() { + return package_of(program_); +} + +string t_cl_generator::prefix(string symbol) { + return "\"" + symbol + "\""; +} + +string t_cl_generator::cl_autogen_comment() { + return + std::string(";;; ") + "Autogenerated by Thrift\n" + + ";;; DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING\n" + + ";;; options string: " + copy_options_ + "\n"; +} + +string t_cl_generator::cl_docstring(string raw) { + replace(raw.begin(), raw.end(), '"', '\''); + return raw; +} + + +void t_cl_generator::close_generator() { + f_asd_.close(); + f_types_.close(); + f_vars_.close(); +} + +string t_cl_generator::generated_package() { + return program_->get_namespace("cpp"); +} + +void t_cl_generator::asdf_def(std::ofstream &out) { + out << "(asdf:defsystem #:" << system_prefix << program_name_ << endl; + indent_up(); + out << indent() << render_includes() + << indent() << ":serial t" << endl + << indent() << ":components (" + << "(:file \"" << program_name_ << "-types\") " + << "(:file \"" << program_name_ << "-vars\")))" << endl; + indent_down(); +} + +/*** + * Generate a package definition. Add use references equivalent to the idl file's include statements. + */ +void t_cl_generator::package_def(std::ofstream &out) { + const vector& includes = program_->get_includes(); + + out << "(thrift:def-package :" << package(); + if ( includes.size() > 0 ) { + out << " :use ("; + for (size_t i = 0; i < includes.size(); ++i) { + out << " :" << includes[i]->get_name(); + } + out << ")"; + } + out << ")" << endl << endl; +} + +void t_cl_generator::package_in(std::ofstream &out) { + out << "(cl:in-package :" << package() << ")" << endl << endl; +} + +/** + * Generates a typedef. This is not done in Common Lisp, types are all implicit. + * + * @param ttypedef The type definition + */ +void t_cl_generator::generate_typedef(t_typedef* ttypedef) { + (void)ttypedef; +} + +void t_cl_generator::generate_enum(t_enum* tenum) { + f_types_ << "(thrift:def-enum " << prefix(tenum->get_name()) << endl; + + vector constants = tenum->get_constants(); + vector::iterator c_iter; + int value = -1; + + indent_up(); + f_types_ << indent() << "("; + for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) { + value = (*c_iter)->get_value(); + + if(c_iter != constants.begin()) f_types_ << endl << indent() << " "; + + f_types_ << "(\"" << (*c_iter)->get_name() << "\" . " << value << ")"; + } + indent_down(); + f_types_ << "))" << endl << endl; +} + +/** + * Generate a constant value + */ +void t_cl_generator::generate_const(t_const* tconst) { + t_type* type = tconst->get_type(); + string name = tconst->get_name(); + t_const_value* value = tconst->get_value(); + + f_vars_ << "(thrift:def-constant " << prefix(name) << " " << render_const_value(type, value) << ")" + << endl << endl; +} + +/** + * Prints the value of a constant with the given type. Note that type checking + * is NOT performed in this function as it is always run beforehand using the + * validate_types method in main.cc + */ +string t_cl_generator::render_const_value(t_type* type, t_const_value* value) { + type = get_true_type(type); + std::ostringstream out; + if (type->is_base_type()) { + t_base_type::t_base tbase = ((t_base_type*)type)->get_base(); + switch (tbase) { + case t_base_type::TYPE_STRING: + out << "\"" << value->get_string() << "\""; + break; + case t_base_type::TYPE_BOOL: + out << (value->get_integer() > 0 ? "t" : "nil"); + break; + case t_base_type::TYPE_I8: + case t_base_type::TYPE_I16: + case t_base_type::TYPE_I32: + case t_base_type::TYPE_I64: + out << value->get_integer(); + break; + case t_base_type::TYPE_DOUBLE: + if (value->get_type() == t_const_value::CV_INTEGER) { + out << value->get_integer(); + } else { + out << value->get_double(); + } + break; + default: + throw "compiler error: no const of base type " + t_base_type::t_base_name(tbase); + } + } else if (type->is_enum()) { + indent(out) << value->get_integer(); + } else if (type->is_struct() || type->is_xception()) { + out << (type->is_struct() ? "(make-instance '" : "(make-exception '") << + lowercase(type->get_name()) << " " << endl; + indent_up(); + + const vector& fields = ((t_struct*)type)->get_members(); + vector::const_iterator f_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; + + for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { + t_type* field_type = NULL; + for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { + if ((*f_iter)->get_name() == v_iter->first->get_string()) { + field_type = (*f_iter)->get_type(); + } + } + if (field_type == NULL) { + throw "type error: " + type->get_name() + " has no field " + v_iter->first->get_string(); + } + + out << indent() << ":" << v_iter->first->get_string() << " " << + render_const_value(field_type, v_iter->second) << endl; + } + out << indent() << ")"; + + indent_down(); + } else if (type->is_map()) { + // emit an hash form with both keys and values to be evaluated + t_type* ktype = ((t_map*)type)->get_key_type(); + t_type* vtype = ((t_map*)type)->get_val_type(); + out << "(thrift:map "; + indent_up(); + const map& val = value->get_map(); + map::const_iterator v_iter; + for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { + out << endl << indent() + << "(cl:cons " << render_const_value(ktype, v_iter->first) << " " + << render_const_value(vtype, v_iter->second) << ")"; + } + indent_down(); + out << indent() << ")"; + } else if (type->is_list() || type->is_set()) { + t_type* etype; + if (type->is_list()) { + etype = ((t_list*)type)->get_elem_type(); + } else { + etype = ((t_set*)type)->get_elem_type(); + } + if (type->is_set()) { + out << "(thrift:set" << endl; + } else { + out << "(thrift:list" << endl; + } + indent_up(); + indent_up(); + const vector& val = value->get_list(); + vector::const_iterator v_iter; + for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { + out << indent() << render_const_value(etype, *v_iter) << endl; + } + out << indent() << ")"; + indent_down(); + indent_down(); + } else { + throw "CANNOT GENERATE CONSTANT FOR TYPE: " + type->get_name(); + } + return out.str(); +} + +void t_cl_generator::generate_struct(t_struct* tstruct) { + generate_cl_struct(f_types_, tstruct, false); +} + +void t_cl_generator::generate_xception(t_struct* txception) { + generate_cl_struct(f_types_, txception, true); +} + +void t_cl_generator::generate_cl_struct_internal(std::ofstream& out, t_struct* tstruct, bool is_exception) { + (void)is_exception; + const vector& members = tstruct->get_members(); + vector::const_iterator m_iter; + + out << "("; + + for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) { + t_const_value* value = (*m_iter)->get_value(); + t_type* type = (*m_iter)->get_type(); + + if (m_iter != members.begin()) { + out << endl << indent() << " "; + } + out << "(" << prefix((*m_iter)->get_name()) << " " << + ( (NULL != value) ? render_const_value(type, value) : "nil" ) << + " :id " << (*m_iter)->get_key(); + if ( type->is_base_type() && "string" == typespec(type) ) + if ( ((t_base_type*)type)->is_binary() ) + out << " :type binary"; + else + out << " :type string"; + else + out << " :type " << typespec(type); + if ( (*m_iter)->get_req() == t_field::T_OPTIONAL ) { + out << " :optional t"; + } + if ( (*m_iter)->has_doc()) { + out << " :documentation \"" << cl_docstring((*m_iter)->get_doc()) << "\""; + } + out <<")"; + } + + out << ")"; +} + +void t_cl_generator::generate_cl_struct(std::ofstream& out, t_struct* tstruct, bool is_exception = false) { + std::string name = type_name(tstruct); + out << (is_exception ? "(thrift:def-exception " : "(thrift:def-struct ") << + prefix(name) << endl; + indent_up(); + if ( tstruct->has_doc() ) { + out << indent() ; + out << "\"" << cl_docstring(tstruct->get_doc()) << "\"" << endl; + } + out << indent() ; + generate_cl_struct_internal(out, tstruct, is_exception); + indent_down(); + out << ")" << endl << endl; +} + +void t_cl_generator::generate_exception_sig(std::ofstream& out, t_function* f) { + generate_cl_struct_internal(out, f->get_xceptions(), true); +} + +void t_cl_generator::generate_service(t_service* tservice) { + string extends_client; + vector functions = tservice->get_functions(); + vector::iterator f_iter; + + if (tservice->get_extends() != NULL) { + extends_client = type_name(tservice->get_extends()); + } + + extends_client = extends_client.empty() ? "nil" : prefix(extends_client); + + f_types_ << "(thrift:def-service " << prefix(service_name_) << " " + << extends_client; + + indent_up(); + + if ( tservice->has_doc()) { + f_types_ << endl << indent() + << "(:documentation \"" << cl_docstring(tservice->get_doc()) << "\")"; + } + + for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { + t_function* function = *f_iter; + string fname = function->get_name(); + string signature = function_signature(function); + t_struct* exceptions = function->get_xceptions(); + const vector& xmembers = exceptions->get_members(); + + f_types_ << endl << indent() << "(:method " << prefix(fname); + f_types_ << " (" << signature << " " << typespec((*f_iter)->get_returntype()) << ")"; + if (xmembers.size() > 0) { + f_types_ << endl << indent() << " :exceptions " ; + generate_exception_sig(f_types_, function); + } + if ( (*f_iter)->is_oneway() ) { + f_types_ << endl << indent() << " :oneway t"; + } + if ( (*f_iter)->has_doc() ) { + f_types_ << endl << indent() << " :documentation \"" + << cl_docstring((*f_iter)->get_doc()) << "\""; + } + f_types_ << ")"; + } + + f_types_ << ")" << endl << endl; + + indent_down(); +} + +string t_cl_generator::typespec(t_type *t) { + t = get_true_type(t); + + if (t -> is_binary()){ + return "binary"; + } else if (t->is_base_type()) { + return type_name(t); + } else if (t->is_map()) { + t_map *m = (t_map*) t; + return "(thrift:map " + typespec(m->get_key_type()) + " " + + typespec(m->get_val_type()) + ")"; + } else if (t->is_struct() || t->is_xception()) { + return "(struct " + prefix(type_name(t)) + ")"; + } else if (t->is_list()) { + return "(thrift:list " + typespec(((t_list*) t)->get_elem_type()) + ")"; + } else if (t->is_set()) { + return "(thrift:set " + typespec(((t_set*) t)->get_elem_type()) + ")"; + } else if (t->is_enum()) { + return "(enum \"" + ((t_enum*) t)->get_name() + "\")"; + } else { + throw "Sorry, I don't know how to generate this: " + type_name(t); + } +} + +string t_cl_generator::function_signature(t_function* tfunction) { + return argument_list(tfunction->get_arglist()); +} + +string t_cl_generator::argument_list(t_struct* tstruct) { + stringstream res; + res << "("; + + const vector& fields = tstruct->get_members(); + vector::const_iterator f_iter; + bool first = true; + for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { + if (first) { + first = false; + } else { + res << " "; + } + res << "(" + prefix((*f_iter)->get_name()) << " " << + typespec((*f_iter)->get_type()) << " " << + (*f_iter)->get_key() << ")"; + + + } + res << ")"; + return res.str(); +} + +string t_cl_generator::type_name(t_type* ttype) { + string prefix = ""; + t_program* program = ttype->get_program(); + + if (program != NULL && program != program_) + prefix = package_of(program) == package() ? "" : package_of(program) + ":"; + + string name = ttype->get_name(); + + if (ttype->is_struct() || ttype->is_xception()) + name = lowercase(ttype->get_name()); + + return prefix + name; +} + +THRIFT_REGISTER_GENERATOR( + cl, + "Common Lisp", + " no_asd: Do not define ASDF systems for each generated Thrift program.\n" + " sys_pref= The prefix to give ASDF system names. Default: thrift-gen-\n") diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cocoa_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cocoa_generator.cc index c2f09e8e6..0c0e1e0d4 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cocoa_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cocoa_generator.cc @@ -2733,8 +2733,8 @@ void t_cocoa_generator::print_const_value(ostream& out, indent(out); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; if (defval) out << type_name(type) << " "; out << name << " = [" << type_name(type, true) << " new];" @@ -2758,8 +2758,8 @@ void t_cocoa_generator::print_const_value(ostream& out, indent(mapout); t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; if (defval) mapout << type_name(type) << " "; mapout << name << " = @{"; @@ -2904,8 +2904,8 @@ string t_cocoa_generator::render_const_value(string name, } else if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; if (val.size() > 0) render << "[[" << type_name(type, true) << " alloc] initWith"; else @@ -2937,8 +2937,8 @@ string t_cocoa_generator::render_const_value(string name, render << "[[NSDictionary alloc] initWithObjectsAndKeys: "; t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; bool first = true; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(name, ktype, v_iter->first, true); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cpp_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cpp_generator.cc index 1c9395715..c78b806ed 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cpp_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_cpp_generator.cc @@ -24,7 +24,9 @@ #include #include +#include #include +#include #include #include #include @@ -703,8 +705,8 @@ void t_cpp_generator::print_const_value(ofstream& out, } else if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; bool is_nonrequired_field = false; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; @@ -728,8 +730,8 @@ void t_cpp_generator::print_const_value(ofstream& out, } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); string val = render_const_value(out, name, vtype, v_iter->second); @@ -788,9 +790,9 @@ string t_cpp_generator::render_const_value(ofstream& out, break; case t_base_type::TYPE_DOUBLE: if (value->get_type() == t_const_value::CV_INTEGER) { - render << value->get_integer(); + render << "static_cast(" << value->get_integer() << ")"; } else { - render << value->get_double(); + render << emit_double_as_string(value->get_double()); } break; default: @@ -1796,7 +1798,7 @@ void t_cpp_generator::generate_service(t_service* tservice) { if (!gen_no_skeleton_) { generate_service_async_skeleton(tservice); } - + } f_header_ << "#ifdef _MSC_VER\n" diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_csharp_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_csharp_generator.cc index a6cb09d1f..b108c4564 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_csharp_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_csharp_generator.cc @@ -511,8 +511,8 @@ void t_csharp_generator::print_const_def_value(std::ofstream& out, if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; prepare_member_name_mapping((t_struct*)type); for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_field* field = NULL; @@ -532,8 +532,8 @@ void t_csharp_generator::print_const_def_value(std::ofstream& out, } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); string val = render_const_value(out, name, vtype, v_iter->second); @@ -988,7 +988,7 @@ void t_csharp_generator::generate_csharp_struct_reader(ofstream& out, t_struct* indent_up(); out << indent() << "throw new TProtocolException(TProtocolException.INVALID_DATA, " - << "\"required field " << prop_name((*f_iter)) << " not set\");" + << "\"required field " << prop_name((*f_iter)) << " not set\");" << endl; indent_down(); } diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_d_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_d_generator.cc index 35f611d93..bbef639ae 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_d_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_d_generator.cc @@ -557,8 +557,8 @@ private: const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -576,8 +576,8 @@ private: } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(ktype, v_iter->first); string val = render_const_value(vtype, v_iter->second); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_dart_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_dart_generator.cc index f7bd1c28b..c2d07e92a 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_dart_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_dart_generator.cc @@ -594,8 +594,8 @@ void t_dart_generator::print_const_value(std::ofstream& out, } else if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; out << type_name(type) << " " << name << " = new " << type_name(type) << "()"; indent_up(); for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { @@ -623,8 +623,8 @@ void t_dart_generator::print_const_value(std::ofstream& out, t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_delphi_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_delphi_generator.cc index 1894fe83b..4db1cf7da 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_delphi_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_delphi_generator.cc @@ -65,6 +65,7 @@ public: constprefix_ = false; events_ = false; xmldoc_ = false; + async_ = false; for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) { if( iter->first.compare("ansistr_binary") == 0) { ansistr_binary_ = true; @@ -76,6 +77,8 @@ public: events_ = true; } else if( iter->first.compare("xmldoc") == 0) { xmldoc_ = true; + } else if( iter->first.compare("async") == 0) { + async_ = true; } else { throw "unknown option delphi:" + iter->first; } @@ -236,6 +239,7 @@ public: void generate_function_helpers(t_function* tfunction); void generate_service_interface(t_service* tservice); + void generate_service_interface(t_service* tservice, bool for_async); void generate_service_helpers(t_service* tservice); void generate_service_client(t_service* tservice); void generate_service_server(t_service* tservice); @@ -323,6 +327,7 @@ public: std::string prefix = "", bool is_xception_class = false); std::string function_signature(t_function* tfunction, + bool for_async, std::string full_cls = "", bool is_xception = false); std::string argument_list(t_struct* tstruct); @@ -399,6 +404,7 @@ private: bool constprefix_; bool events_; bool xmldoc_; + bool async_; void indent_up_impl() { ++indent_impl_; }; void indent_down_impl() { --indent_impl_; }; std::string indent_impl() { @@ -721,15 +727,19 @@ void t_delphi_generator::init_generator() { has_enum = false; has_const = false; create_keywords(); + add_delphi_uses_list("Classes"); add_delphi_uses_list("SysUtils"); add_delphi_uses_list("Generics.Collections"); + if(async_) { + add_delphi_uses_list("System.Threading"); + } + add_delphi_uses_list("Thrift"); add_delphi_uses_list("Thrift.Utils"); add_delphi_uses_list("Thrift.Collections"); add_delphi_uses_list("Thrift.Protocol"); add_delphi_uses_list("Thrift.Transport"); - if (register_types_) { add_delphi_uses_list("Thrift.TypeRegistry"); } @@ -1195,8 +1205,8 @@ void t_delphi_generator::print_const_def_value(std::ostream& vars, if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -1215,8 +1225,8 @@ void t_delphi_generator::print_const_def_value(std::ostream& vars, } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(vars, out, name, ktype, v_iter->first); string val = render_const_value(vars, out, name, vtype, v_iter->second); @@ -1850,19 +1860,28 @@ void t_delphi_generator::generate_service(t_service* tservice) { } void t_delphi_generator::generate_service_interface(t_service* tservice) { + generate_service_interface(tservice,false); + if(async_) { + generate_service_interface(tservice,true); + } +} + + +void t_delphi_generator::generate_service_interface(t_service* tservice, bool for_async) { string extends = ""; string extends_iface = ""; + string iface_name = for_async ? "IAsync" : "Iface"; indent_up(); generate_delphi_doc(s_service, tservice); if (tservice->get_extends() != NULL) { extends = type_name(tservice->get_extends(), true, true); - extends_iface = extends + ".Iface"; + extends_iface = extends + "." + iface_name; generate_delphi_doc(s_service, tservice); - indent(s_service) << "Iface = interface(" << extends_iface << ")" << endl; + indent(s_service) << iface_name << " = interface(" << extends_iface << ")" << endl; } else { - indent(s_service) << "Iface = interface" << endl; + indent(s_service) << iface_name << " = interface" << endl; } indent_up(); @@ -1870,7 +1889,7 @@ void t_delphi_generator::generate_service_interface(t_service* tservice) { vector::iterator f_iter; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { generate_delphi_doc(s_service, *f_iter); - indent(s_service) << function_signature(*f_iter) << endl; + indent(s_service) << function_signature(*f_iter, for_async) << endl; } indent_down(); indent(s_service) << "end;" << endl << endl; @@ -1896,20 +1915,15 @@ void t_delphi_generator::generate_service_helpers(t_service* tservice) { void t_delphi_generator::generate_service_client(t_service* tservice) { indent_up(); string extends = ""; - string extends_client = ""; - if (tservice->get_extends() != NULL) { - extends = type_name(tservice->get_extends()); - extends_client = extends + ".Client, "; - } + string extends_client = "TInterfacedObject"; + string implements = async_ ? "Iface, IAsync" : "Iface"; generate_delphi_doc(s_service, tservice); if (tservice->get_extends() != NULL) { extends = type_name(tservice->get_extends(), true, true); extends_client = extends + ".TClient"; - indent(s_service) << "TClient = class(" << extends_client << ", Iface)" << endl; - } else { - indent(s_service) << "TClient = class( TInterfacedObject, Iface)" << endl; } + indent(s_service) << "TClient = class( " << extends_client << ", " << implements << ")" << endl; indent(s_service) << "public" << endl; indent_up(); @@ -1960,12 +1974,24 @@ void t_delphi_generator::generate_service_client(t_service* tservice) { indent(s_service) << "protected" << endl; indent_up(); + indent(s_service) << "// Iface" << endl; for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { string funname = (*f_iter)->get_name(); generate_delphi_doc(s_service, *f_iter); - indent(s_service) << function_signature(*f_iter) << endl; + indent(s_service) << function_signature(*f_iter, false) << endl; } + + if( async_) { + indent(s_service) << endl; + indent(s_service) << "// IAsync" << endl; + for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { + string funname = (*f_iter)->get_name(); + generate_delphi_doc(s_service, *f_iter); + indent(s_service) << function_signature(*f_iter, true) << endl; + } + } + indent_down(); indent(s_service) << "public" << endl; @@ -1976,36 +2002,65 @@ void t_delphi_generator::generate_service_client(t_service* tservice) { for (f_iter = functions.begin(); f_iter != functions.end(); ++f_iter) { string funname = (*f_iter)->get_name(); - indent_impl(s_service_impl) << function_signature(*f_iter, full_cls) << endl; - indent_impl(s_service_impl) << "begin" << endl; - indent_up_impl(); - indent_impl(s_service_impl) << "send_" << funname << "("; - - t_struct* arg_struct = (*f_iter)->get_arglist(); - - const vector& fields = arg_struct->get_members(); vector::const_iterator fld_iter; - bool first = true; - for (fld_iter = fields.begin(); fld_iter != fields.end(); ++fld_iter) { - if (first) { - first = false; - } else { - s_service_impl << ", "; - } - s_service_impl << normalize_name((*fld_iter)->get_name()); - } - s_service_impl << ");" << endl; + t_struct* arg_struct = (*f_iter)->get_arglist(); + const vector& fields = arg_struct->get_members(); - if (!(*f_iter)->is_oneway()) { - s_service_impl << indent_impl(); - if (!(*f_iter)->get_returntype()->is_void()) { - s_service_impl << "Result := "; - } - s_service_impl << "recv_" << funname << "();" << endl; - } + // one for sync only, two for async+sync + int mode = async_ ? 1 : 0; + while( mode >= 0) { + bool for_async = (mode != 0); + mode--; - indent_down_impl(); - indent_impl(s_service_impl) << "end;" << endl << endl; + indent_impl(s_service_impl) << function_signature(*f_iter, for_async, full_cls) << endl; + indent_impl(s_service_impl) << "begin" << endl; + indent_up_impl(); + + t_type* ttype = (*f_iter)->get_returntype(); + if( for_async) { + if (is_void(ttype)) { + // Delphi forces us to specify a type with IFuture, so we use Integer=0 for void methods + indent_impl(s_service_impl) << "result := TTask.Future(function: Integer" << endl; + } else { + string rettype = type_name(ttype, false, true, false, true); + indent_impl(s_service_impl) << "result := TTask.Future<" << rettype << ">(function: " << rettype << endl; + } + indent_impl(s_service_impl) << "begin" << endl; + indent_up_impl(); + } + + indent_impl(s_service_impl) << "send_" << funname << "("; + + bool first = true; + for (fld_iter = fields.begin(); fld_iter != fields.end(); ++fld_iter) { + if (first) { + first = false; + } else { + s_service_impl << ", "; + } + s_service_impl << normalize_name((*fld_iter)->get_name()); + } + s_service_impl << ");" << endl; + + if (!(*f_iter)->is_oneway()) { + s_service_impl << indent_impl(); + if (!(*f_iter)->get_returntype()->is_void()) { + s_service_impl << "Result := "; + } + s_service_impl << "recv_" << funname << "();" << endl; + } + + if( for_async) { + if (is_void(ttype)) { + indent_impl(s_service_impl) << "Result := 0;" << endl; // no IFuture in Delphi + } + indent_down_impl(); + indent_impl(s_service_impl) << "end);" << endl; + } + + indent_down_impl(); + indent_impl(s_service_impl) << "end;" << endl << endl; + } t_function send_function(g_type_void, string("send_") + (*f_iter)->get_name(), @@ -2018,8 +2073,8 @@ void t_delphi_generator::generate_service_client(t_service* tservice) { string argsvar = tmp("_args"); string msgvar = tmp("_msg"); - indent(s_service) << function_signature(&send_function) << endl; - indent_impl(s_service_impl) << function_signature(&send_function, full_cls) << endl; + indent(s_service) << function_signature(&send_function, false) << endl; + indent_impl(s_service_impl) << function_signature(&send_function, false, full_cls) << endl; indent_impl(s_service_impl) << "var" << endl; indent_up_impl(); indent_impl(s_service_impl) << argsvar << " : " << args_intfnm << ";" << endl; @@ -2072,8 +2127,8 @@ void t_delphi_generator::generate_service_client(t_service* tservice) { string appexvar = tmp("_ax"); string retvar = tmp("_ret"); - indent(s_service) << function_signature(&recv_function) << endl; - indent_impl(s_service_impl) << function_signature(&recv_function, full_cls) << endl; + indent(s_service) << function_signature(&recv_function, false) << endl; + indent_impl(s_service_impl) << function_signature(&recv_function, false, full_cls) << endl; indent_impl(s_service_impl) << "var" << endl; indent_up_impl(); indent_impl(s_service_impl) << msgvar << " : Thrift.Protocol.TThriftMessage;" << endl; @@ -2131,7 +2186,7 @@ void t_delphi_generator::generate_service_client(t_service* tservice) { if (!(*f_iter)->get_returntype()->is_void()) { indent_impl(s_service_impl) - << "raise TApplicationExceptionMissingResult.Create('" + << "raise TApplicationExceptionMissingResult.Create('" << (*f_iter)->get_name() << " failed: unknown result');" << endl; } @@ -2254,8 +2309,8 @@ void t_delphi_generator::generate_service_server(t_service* tservice) { indent_impl(s_service_impl) << "TProtocolUtil.Skip(iprot, TType.Struct);" << endl; indent_impl(s_service_impl) << "iprot.ReadMessageEnd();" << endl; indent_impl(s_service_impl) << "x := " - "TApplicationExceptionUnknownMethod.Create(" - "'Invalid method name: ''' + msg.Name + '''');" << endl; + "TApplicationExceptionUnknownMethod.Create(" + "'Invalid method name: ''' + msg.Name + '''');" << endl; indent_impl(s_service_impl) << "Thrift.Protocol.Init( msg, msg.Name, TMessageType.Exception, msg.SeqID);" << endl; @@ -2452,7 +2507,7 @@ void t_delphi_generator::generate_process_function(t_service* tservice, t_functi indent_impl(s_service_impl) << "if events <> nil then events.UnhandledError(E);" << endl; } if (!tfunction->is_oneway()) { - indent_impl(s_service_impl) << "appx := TApplicationExceptionInternalError.Create(E.Message);" + indent_impl(s_service_impl) << "appx := TApplicationExceptionInternalError.Create(E.Message);" << endl; indent_impl(s_service_impl) << "try" << endl; indent_up_impl(); @@ -3126,6 +3181,7 @@ string t_delphi_generator::declare_field(t_field* tfield, } string t_delphi_generator::function_signature(t_function* tfunction, + bool for_async, std::string full_cls, bool is_xception) { t_type* ttype = tfunction->get_returntype(); @@ -3135,13 +3191,25 @@ string t_delphi_generator::function_signature(t_function* tfunction, } else { prefix = full_cls + "."; } - if (is_void(ttype)) { - return "procedure " + prefix + normalize_name(tfunction->get_name(), true, is_xception) + "(" - + argument_list(tfunction->get_arglist()) + ");"; + + if( for_async) { + if (is_void(ttype)) { + return "function " + prefix + normalize_name(tfunction->get_name(), true, is_xception) + "Async(" + + argument_list(tfunction->get_arglist()) + "): IFuture;"; // no IFuture in Delphi + } else { + return "function " + prefix + normalize_name(tfunction->get_name(), true, is_xception) + "Async(" + + argument_list(tfunction->get_arglist()) + "): IFuture<" + + type_name(ttype, false, true, is_xception, true) + ">;"; + } } else { - return "function " + prefix + normalize_name(tfunction->get_name(), true, is_xception) + "(" - + argument_list(tfunction->get_arglist()) + "): " - + type_name(ttype, false, true, is_xception, true) + ";"; + if (is_void(ttype)) { + return "procedure " + prefix + normalize_name(tfunction->get_name(), true, is_xception) + "(" + + argument_list(tfunction->get_arglist()) + ");"; + } else { + return "function " + prefix + normalize_name(tfunction->get_name(), true, is_xception) + "(" + + argument_list(tfunction->get_arglist()) + "): " + + type_name(ttype, false, true, is_xception, true) + ";"; + } } } @@ -3721,7 +3789,7 @@ void t_delphi_generator::generate_delphi_struct_writer_impl(ostream& out, if (is_required && null_allowed) { null_allowed = false; indent_impl(code_block) << "if (Self." << fieldname << " = nil)" << endl; - indent_impl(code_block) << "then raise TProtocolExceptionInvalidData.Create(" + indent_impl(code_block) << "then raise TProtocolExceptionInvalidData.Create(" << "'required field " << fieldname << " not set');" << endl; } @@ -3917,4 +3985,5 @@ THRIFT_REGISTER_GENERATOR( " and container instances by interface or TypeInfo()\n" " constprefix: Name TConstants classes after IDL to reduce ambiguities\n" " events: Enable and use processing events in the generated code.\n" - " xmldoc: Enable XMLDoc comments for Help Insight etc.\n") + " xmldoc: Enable XMLDoc comments for Help Insight etc.\n" + " async: Generate IAsync interface to use Parallel Programming Library (XE7+ only).\n") diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_erl_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_erl_generator.cc index 372c78bf7..768db139d 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_erl_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_erl_generator.cc @@ -384,6 +384,36 @@ void t_erl_generator::close_generator() { f_consts_hrl_file_.close(); } +const std::string emit_double_as_string(const double value) { + std::stringstream double_output_stream; + // sets the maximum precision: http://en.cppreference.com/w/cpp/io/manip/setprecision + // sets the output format to fixed: http://en.cppreference.com/w/cpp/io/manip/fixed (not in scientific notation) + double_output_stream << std::setprecision(std::numeric_limits::digits10 + 1); + + #ifdef _MSC_VER + // strtod is broken in MSVC compilers older than 2015, so std::fixed fails to format a double literal. + // more details: https://blogs.msdn.microsoft.com/vcblog/2014/06/18/ + // c-runtime-crt-features-fixes-and-breaking-changes-in-visual-studio-14-ctp1/ + // and + // http://www.exploringbinary.com/visual-c-plus-plus-strtod-still-broken/ + #if _MSC_VER >= MSC_2015_VER + double_output_stream << std::fixed; + #else + // note that if this function is called from the erlang generator and the MSVC compiler is older than 2015, + // the double literal must be output in the scientific format. There can be some cases where the + // mantissa of the output does not have fractionals, which is illegal in Erlang. + // example => 10000000000000000.0 being output as 1e+16 + double_output_stream << std::scientific; + #endif + #else + double_output_stream << std::fixed; + #endif + + double_output_stream << value; + + return double_output_stream.str(); +} + void t_erl_generator::generate_type_metadata(std::string function_name, vector names) { vector::iterator s_iter; size_t num_structs = names.size(); @@ -430,7 +460,7 @@ void t_erl_generator::generate_const_function(t_const* tconst, ostringstream& ex exports << const_fun_name << "/1, " << const_fun_name << "/2"; // Emit const function definition. - map::const_iterator i, end = value->get_map().end(); + map::const_iterator i, end = value->get_map().end(); // The one-argument form throws an error if the key does not exist in the map. for (i = value->get_map().begin(); i != end;) { functions << const_fun_name << "(" << render_const_value(ktype, i->first) << ") -> " @@ -575,9 +605,9 @@ string t_erl_generator::render_const_value(t_type* type, t_const_value* value) { break; case t_base_type::TYPE_DOUBLE: if (value->get_type() == t_const_value::CV_INTEGER) { - out << value->get_integer(); + out << "float(" << value->get_integer() << ")"; } else { - out << value->get_double(); + out << emit_double_as_string(value->get_double()); } break; default: @@ -590,8 +620,8 @@ string t_erl_generator::render_const_value(t_type* type, t_const_value* value) { out << "#" << type_name(type) << "{"; const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; bool first = true; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { @@ -626,7 +656,7 @@ string t_erl_generator::render_const_value(t_type* type, t_const_value* value) { } else { out << "dict:from_list(["; } - map::const_iterator i, end = value->get_map().end(); + map::const_iterator i, end = value->get_map().end(); for (i = value->get_map().begin(); i != end;) { out << "{" << render_const_value(ktype, i->first) << "," << render_const_value(vtype, i->second) << "}"; @@ -717,7 +747,7 @@ string t_erl_generator::render_member_type(t_field* field) { return type_name(type) + "()"; } else if (type->is_map()) { if (maps_) { - return "#{}"; + return "map()"; } else if (otp16_) { return "dict()"; } else { @@ -810,6 +840,8 @@ void t_erl_generator::generate_erl_struct_member(ostream& out, t_field* tmember) if (has_default_value(tmember)) out << " = " << render_member_value(tmember); out << " :: " << render_member_type(tmember); + if (tmember->get_req() != t_field::T_REQUIRED) + out << " | 'undefined'"; } bool t_erl_generator::has_default_value(t_field* field) { diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.cc index 0c1f49daf..7549d5dc2 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.cc @@ -101,21 +101,22 @@ void t_generator::generate_docstring_comment(ostream& out, const string& line_prefix, const string& contents, const string& comment_end) { - if (comment_start != "") + if (!comment_start.empty()) indent(out) << comment_start; stringstream docs(contents, ios_base::in); while (!(docs.eof() || docs.fail())) { char line[1024]; docs.getline(line, 1024); - // Just prnt a newline when the line & prefix are empty. - if (strlen(line) == 0 && line_prefix == "" && !docs.eof()) { - out << std::endl; - } else if (strlen(line) > 0 || !docs.eof()) { // skip the empty last line + if (strlen(line) > 0) { indent(out) << line_prefix << line << std::endl; + } else if (line_prefix.empty()){ + out << std::endl; + } else if(!docs.eof()) { + indent(out) << line_prefix << std::endl; } } - if (comment_end != "") + if (!comment_end.empty()) indent(out) << comment_end; } diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.h b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.h index fc3f32321..cbbfcb9fa 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.h +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_generator.h @@ -19,10 +19,13 @@ #ifndef T_GENERATOR_H #define T_GENERATOR_H +#define MSC_2015_VER 1900 #include +#include #include #include +#include #include #include "thrift/common.h" #include "thrift/version.h" @@ -93,7 +96,7 @@ public: protected: /** - * Optional methods that may be imlemented by subclasses to take necessary + * Optional methods that may be implemented by subclasses to take necessary * steps at the beginning or end of code generation. */ @@ -268,6 +271,30 @@ protected: return out.str(); } + const std::string emit_double_as_string(const double value) { + std::stringstream double_output_stream; + // sets the maximum precision: http://en.cppreference.com/w/cpp/io/manip/setprecision + // sets the output format to fixed: http://en.cppreference.com/w/cpp/io/manip/fixed (not in scientific notation) + double_output_stream << std::setprecision(std::numeric_limits::digits10 + 1); + + #ifdef _MSC_VER + // strtod is broken in MSVC compilers older than 2015, so std::fixed fails to format a double literal. + // more details: https://blogs.msdn.microsoft.com/vcblog/2014/06/18/ + // c-runtime-crt-features-fixes-and-breaking-changes-in-visual-studio-14-ctp1/ + // and + // http://www.exploringbinary.com/visual-c-plus-plus-strtod-still-broken/ + #if _MSC_VER >= MSC_2015_VER + double_output_stream << std::fixed; + #endif + #else + double_output_stream << std::fixed; + #endif + + double_output_stream << value; + + return double_output_stream.str(); + } + public: /** * Get the true type behind a series of typedefs. diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_go_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_go_generator.cc index 6cce32bcc..f3c7fee31 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_go_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_go_generator.cc @@ -81,7 +81,6 @@ public: gen_package_prefix_ = ""; package_flag = ""; read_write_private_ = false; - legacy_context_ = false; ignore_initialisms_ = false; for( iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) { if( iter->first.compare("package_prefix") == 0) { @@ -92,8 +91,6 @@ public: package_flag = (iter->second); } else if( iter->first.compare("read_write_private") == 0) { read_write_private_ = true; - } else if( iter->first.compare("legacy_context") == 0) { - legacy_context_ = true; } else if( iter->first.compare("ignore_initialisms") == 0) { ignore_initialisms_ = true; } else { @@ -287,7 +284,6 @@ private: std::string gen_package_prefix_; std::string gen_thrift_import_; bool read_write_private_; - bool legacy_context_; bool ignore_initialisms_; /** @@ -753,14 +749,6 @@ void t_go_generator::init_generator() { f_consts_name_ = package_dir_ + "/" + program_name_ + "-consts.go"; f_consts_.open(f_consts_name_.c_str()); - vector services = program_->get_services(); - vector::iterator sv_iter; - - for (sv_iter = services.begin(); sv_iter != services.end(); ++sv_iter) { - string service_dir = package_dir_ + "/" + underscore((*sv_iter)->get_name()) + "-remote"; - MKDIR(service_dir.c_str()); - } - // Print header f_types_ << go_autogen_comment() << go_package() << render_includes(false); @@ -883,16 +871,10 @@ string t_go_generator::go_imports_begin(bool consts) { "\t\"database/sql/driver\"\n" "\t\"errors\"\n"; } - if (legacy_context_) { - extra += - "\t\"golang.org/x/net/context\"\n"; - } else { - extra += - "\t\"context\"\n"; - } return string( "import (\n" "\t\"bytes\"\n" + "\t\"context\"\n" "\t\"reflect\"\n" + extra + "\t\"fmt\"\n" @@ -1115,8 +1097,8 @@ string t_go_generator::render_const_value(t_type* type, t_const_value* value, co indent_up(); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; @@ -1141,10 +1123,10 @@ string t_go_generator::render_const_value(t_type* type, t_const_value* value, co } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); + const map& val = value->get_map(); out << "map[" << type_to_go_type(ktype) << "]" << type_to_go_type(vtype) << "{" << endl; indent_up(); - map::const_iterator v_iter; + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { out << indent() << render_const_value(ktype, v_iter->first, name) << ": " @@ -1878,16 +1860,16 @@ void t_go_generator::generate_service_client(t_service* tservice) { f_types_ << indent() << "type " << serviceName << "Client struct {" << endl; indent_up(); - f_types_ << indent() << "c thrift.TClient" << endl; if (!extends_client.empty()) { f_types_ << indent() << "*" << extends_client << endl; + } else { + f_types_ << indent() << "c thrift.TClient" << endl; } indent_down(); f_types_ << indent() << "}" << endl << endl; // Legacy constructor function - f_types_ << indent() << "// Deprecated: Use New" << serviceName << " instead" << endl; f_types_ << indent() << "func New" << serviceName << "ClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *" << serviceName << "Client {" << endl; @@ -1907,7 +1889,6 @@ void t_go_generator::generate_service_client(t_service* tservice) { indent_down(); f_types_ << indent() << "}" << endl << endl; // Legacy constructor function with custom input & output protocols - f_types_ << indent() << "// Deprecated: Use New" << serviceName << " instead" << endl; f_types_ << indent() << "func New" << serviceName << "ClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *" @@ -1936,9 +1917,10 @@ void t_go_generator::generate_service_client(t_service* tservice) { f_types_ << indent() << "return &" << serviceName << "Client{" << endl; indent_up(); - f_types_ << indent() << "c: c," << endl; if (!extends.empty()) { f_types_ << indent() << extends_field << ": " << extends_client_new << "(c)," << endl; + } else { + f_types_ << indent() << "c: c," << endl; } indent_down(); f_types_ << indent() << "}" << endl; @@ -1946,6 +1928,14 @@ void t_go_generator::generate_service_client(t_service* tservice) { indent_down(); f_types_ << indent() << "}" << endl << endl; + if (extends.empty()) { + f_types_ << indent() << "func (p *" << serviceName << "Client) Client_() thrift.TClient {" << endl; + indent_up(); + f_types_ << indent() << "return p.c" << endl; + indent_down(); + f_types_ << indent() << "}" << endl; + } + // Generate client method implementations vector functions = tservice->get_functions(); vector::const_iterator f_iter; @@ -1975,7 +1965,7 @@ void t_go_generator::generate_service_client(t_service* tservice) { std::string resultName = tmp("_result"); std::string resultType = publicize(method + "_result", true); f_types_ << indent() << "var " << resultName << " " << resultType << endl; - f_types_ << indent() << "if err = p.c.Call(ctx, \"" + f_types_ << indent() << "if err = p.Client_().Call(ctx, \"" << method << "\", &" << argsName << ", &" << resultName << "); err != nil {" << endl; indent_up(); @@ -2016,7 +2006,7 @@ void t_go_generator::generate_service_client(t_service* tservice) { } } else { // TODO: would be nice to not to duplicate the call generation - f_types_ << indent() << "if err := p.c.Call(ctx, \"" + f_types_ << indent() << "if err := p.Client_().Call(ctx, \"" << method << "\", &"<< argsName << ", nil); err != nil {" << endl; indent_up(); @@ -2047,8 +2037,16 @@ void t_go_generator::generate_service_remote(t_service* tservice) { parent = parent->get_extends(); } + // This file is not useful if there are no functions; don't generate it + if (functions.size() == 0) { + return; + } + + string f_remote_dir = package_dir_ + "/" + underscore(service_name_) + "-remote"; + MKDIR(f_remote_dir.c_str()); + vector::iterator f_iter; - string f_remote_name = package_dir_ + "/" + underscore(service_name_) + "-remote/" + string f_remote_name = f_remote_dir + "/" + underscore(service_name_) + "-remote.go"; ofstream f_remote; f_remote.open(f_remote_name.c_str()); @@ -2065,9 +2063,6 @@ void t_go_generator::generate_service_remote(t_service* tservice) { string unused_protection; string ctxPackage = "context"; - if (legacy_context_) { - ctxPackage = "golang.org/x/net/context"; - } f_remote << go_autogen_comment(); f_remote << indent() << "package main" << endl << endl; @@ -2121,6 +2116,24 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << " os.Exit(0)" << endl; f_remote << indent() << "}" << endl; f_remote << indent() << endl; + + f_remote << indent() << "type httpHeaders map[string]string" << endl; + f_remote << indent() << endl; + f_remote << indent() << "func (h httpHeaders) String() string {" << endl; + f_remote << indent() << " var m map[string]string = h" << endl; + f_remote << indent() << " return fmt.Sprintf(\"%s\", m)" << endl; + f_remote << indent() << "}" << endl; + f_remote << indent() << endl; + f_remote << indent() << "func (h httpHeaders) Set(value string) error {" << endl; + f_remote << indent() << " parts := strings.Split(value, \": \")" << endl; + f_remote << indent() << " if len(parts) != 2 {" << endl; + f_remote << indent() << " return fmt.Errorf(\"header should be of format 'Key: Value'\")" << endl; + f_remote << indent() << " }" << endl; + f_remote << indent() << " h[parts[0]] = parts[1]" << endl; + f_remote << indent() << " return nil" << endl; + f_remote << indent() << "}" << endl; + f_remote << indent() << endl; + f_remote << indent() << "func main() {" << endl; indent_up(); f_remote << indent() << "flag.Usage = Usage" << endl; @@ -2130,6 +2143,7 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << "var urlString string" << endl; f_remote << indent() << "var framed bool" << endl; f_remote << indent() << "var useHttp bool" << endl; + f_remote << indent() << "headers := make(httpHeaders)" << endl; f_remote << indent() << "var parsedUrl *url.URL" << endl; f_remote << indent() << "var trans thrift.TTransport" << endl; f_remote << indent() << "_ = strconv.Atoi" << endl; @@ -2144,6 +2158,7 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << "flag.BoolVar(&framed, \"framed\", false, \"Use framed transport\")" << endl; f_remote << indent() << "flag.BoolVar(&useHttp, \"http\", false, \"Use http\")" << endl; + f_remote << indent() << "flag.Var(headers, \"H\", \"Headers to set on the http(s) request (e.g. -H \\\"Key: Value\\\")\")" << endl; f_remote << indent() << "flag.Parse()" << endl; f_remote << indent() << endl; f_remote << indent() << "if len(urlString) > 0 {" << endl; @@ -2154,7 +2169,7 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << " flag.Usage()" << endl; f_remote << indent() << " }" << endl; f_remote << indent() << " host = parsedUrl.Host" << endl; - f_remote << indent() << " useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == \"http\"" + f_remote << indent() << " useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == \"http\" || parsedUrl.Scheme == \"https\"" << endl; f_remote << indent() << "} else if useHttp {" << endl; f_remote << indent() << " _, err := url.Parse(fmt.Sprint(\"http://\", host, \":\", port))" @@ -2169,6 +2184,12 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << "var err error" << endl; f_remote << indent() << "if useHttp {" << endl; f_remote << indent() << " trans, err = thrift.NewTHttpClient(parsedUrl.String())" << endl; + f_remote << indent() << " if len(headers) > 0 {" << endl; + f_remote << indent() << " httptrans := trans.(*thrift.THttpClient)" << endl; + f_remote << indent() << " for key, value := range headers {" << endl; + f_remote << indent() << " httptrans.SetHeader(key, value)" << endl; + f_remote << indent() << " }" << endl; + f_remote << indent() << " }" << endl; f_remote << indent() << "} else {" << endl; f_remote << indent() << " portStr := fmt.Sprint(port)" << endl; f_remote << indent() << " if strings.Contains(host, \":\") {" << endl; @@ -2355,7 +2376,7 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << " Usage()" << endl; f_remote << indent() << " return" << endl; f_remote << indent() << "}" << endl; - f_remote << indent() << factory << " := thrift.NewTSimpleJSONProtocolFactory()" << endl; + f_remote << indent() << factory << " := thrift.NewTJSONProtocolFactory()" << endl; f_remote << indent() << jsProt << " := " << factory << ".GetProtocol(" << mbTrans << ")" << endl; f_remote << indent() << "argvalue" << i << " := " << tstruct_module << ".New" << tstruct_name @@ -2383,7 +2404,7 @@ void t_go_generator::generate_service_remote(t_service* tservice) { f_remote << indent() << " Usage()" << endl; f_remote << indent() << " return" << endl; f_remote << indent() << "}" << endl; - f_remote << indent() << factory << " := thrift.NewTSimpleJSONProtocolFactory()" << endl; + f_remote << indent() << factory << " := thrift.NewTJSONProtocolFactory()" << endl; f_remote << indent() << jsProt << " := " << factory << ".GetProtocol(" << mbTrans << ")" << endl; f_remote << indent() << "containerStruct" << i << " := " << package_name_ << ".New" @@ -2568,7 +2589,7 @@ void t_go_generator::generate_service_server(t_service* tservice) { f_types_ << indent() << " oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)" << endl; f_types_ << indent() << " " << x << ".Write(oprot)" << endl; f_types_ << indent() << " oprot.WriteMessageEnd()" << endl; - f_types_ << indent() << " oprot.Flush()" << endl; + f_types_ << indent() << " oprot.Flush(ctx)" << endl; f_types_ << indent() << " return false, " << x << endl; f_types_ << indent() << "" << endl; f_types_ << indent() << "}" << endl << endl; @@ -2633,7 +2654,7 @@ void t_go_generator::generate_process_function(t_service* tservice, t_function* << "\", thrift.EXCEPTION, seqId)" << endl; f_types_ << indent() << " x.Write(oprot)" << endl; f_types_ << indent() << " oprot.WriteMessageEnd()" << endl; - f_types_ << indent() << " oprot.Flush()" << endl; + f_types_ << indent() << " oprot.Flush(ctx)" << endl; } f_types_ << indent() << " return false, err" << endl; f_types_ << indent() << "}" << endl << endl; @@ -2701,7 +2722,7 @@ void t_go_generator::generate_process_function(t_service* tservice, t_function* << "\", thrift.EXCEPTION, seqId)" << endl; f_types_ << indent() << " x.Write(oprot)" << endl; f_types_ << indent() << " oprot.WriteMessageEnd()" << endl; - f_types_ << indent() << " oprot.Flush()" << endl; + f_types_ << indent() << " oprot.Flush(ctx)" << endl; } f_types_ << indent() << " return true, err2" << endl; @@ -2738,7 +2759,7 @@ void t_go_generator::generate_process_function(t_service* tservice, t_function* << endl; f_types_ << indent() << " err = err2" << endl; f_types_ << indent() << "}" << endl; - f_types_ << indent() << "if err2 = oprot.Flush(); err == nil && err2 != nil {" << endl; + f_types_ << indent() << "if err2 = oprot.Flush(ctx); err == nil && err2 != nil {" << endl; f_types_ << indent() << " err = err2" << endl; f_types_ << indent() << "}" << endl; f_types_ << indent() << "if err != nil {" << endl; @@ -3634,6 +3655,4 @@ THRIFT_REGISTER_GENERATOR(go, "Go", " ignore_initialisms\n" " Disable automatic spelling correction of initialisms (e.g. \"URL\")\n" \ " read_write_private\n" - " Make read/write methods private, default is public Read/Write\n" \ - " legacy_context\n" - " Use legacy x/net/context instead of context in go<1.7.\n") + " Make read/write methods private, default is public Read/Write\n") diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_gv_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_gv_generator.cc index 14b537712..c2f8b5a9c 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_gv_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_gv_generator.cc @@ -249,8 +249,8 @@ void t_gv_generator::print_const_value(t_type* type, t_const_value* tvalue) { break; case t_const_value::CV_MAP: { f_out_ << "\\{ "; - map map_elems = tvalue->get_map(); - map::iterator map_iter; + map map_elems = tvalue->get_map(); + map::iterator map_iter; for (map_iter = map_elems.begin(); map_iter != map_elems.end(); map_iter++) { if (!first) { f_out_ << ", "; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_haxe_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_haxe_generator.cc index 97c7d19a3..ce3816d3a 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_haxe_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_haxe_generator.cc @@ -503,8 +503,8 @@ void t_haxe_generator::print_const_value(std::ofstream& out, } else if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; out << name << ":" << type_name(type) << " = new " << type_name(type, false, true) << "();" << endl; if (!in_static) { @@ -548,8 +548,8 @@ void t_haxe_generator::print_const_value(std::ofstream& out, } t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); string val = render_const_value(out, name, vtype, v_iter->second); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_hs_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_hs_generator.cc index d0a8cb2d6..6c8cb7fc9 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_hs_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_hs_generator.cc @@ -424,10 +424,10 @@ string t_hs_generator::render_const_value(t_type* type, t_const_value* value) { out << "default_" << cname << "{"; const vector& fields = ((t_struct*)type)->get_members(); - const map& val = value->get_map(); + const map& val = value->get_map(); bool first = true; - for (map::const_iterator v_iter = val.begin(); + for (map::const_iterator v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_field* field = NULL; @@ -458,8 +458,8 @@ string t_hs_generator::render_const_value(t_type* type, t_const_value* value) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; out << "(Map.fromList ["; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_html_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_html_generator.cc index dfd5df311..5b063707f 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_html_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_html_generator.cc @@ -777,8 +777,8 @@ void t_html_generator::print_const_value(t_type* type, t_const_value* tvalue) { f_out_ << "{ "; const vector& fields = ((t_struct*)truetype)->get_members(); vector::const_iterator f_iter; - const map& val = tvalue->get_map(); - map::const_iterator v_iter; + const map& val = tvalue->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -800,8 +800,8 @@ void t_html_generator::print_const_value(t_type* type, t_const_value* tvalue) { f_out_ << " }"; } else if (truetype->is_map()) { f_out_ << "{ "; - map map_elems = tvalue->get_map(); - map::iterator map_iter; + map map_elems = tvalue->get_map(); + map::iterator map_iter; for (map_iter = map_elems.begin(); map_iter != map_elems.end(); map_iter++) { if (!first) { f_out_ << ", "; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_java_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_java_generator.cc index ebc83505a..3c7b75309 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_java_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_java_generator.cc @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -596,6 +597,7 @@ void t_java_generator::generate_consts(std::vector consts) { f_consts.close(); } + /** * Prints the value of a constant with the given type. Note that type checking * is NOT performed in this function as it is always run beforehand using the @@ -619,10 +621,12 @@ void t_java_generator::print_const_value(std::ofstream& out, } else if (type->is_enum()) { out << name << " = " << render_const_value(out, type, value) << ";" << endl << endl; } else if (type->is_struct() || type->is_xception()) { - const vector& fields = ((t_struct*)type)->get_members(); + const vector& unsorted_fields = ((t_struct*)type)->get_members(); + vector fields = unsorted_fields; + std::sort(fields.begin(), fields.end(), t_field::key_compare()); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; out << name << " = new " << type_name(type, false, true) << "();" << endl; if (!in_static) { indent(out) << "static {" << endl; @@ -660,8 +664,8 @@ void t_java_generator::print_const_value(std::ofstream& out, } t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, ktype, v_iter->first); string val = render_const_value(out, vtype, v_iter->second); @@ -731,9 +735,9 @@ string t_java_generator::render_const_value(ofstream& out, t_type* type, t_const break; case t_base_type::TYPE_DOUBLE: if (value->get_type() == t_const_value::CV_INTEGER) { - render << "(double)" << value->get_integer(); + render << value->get_integer() << "d"; } else { - render << value->get_double(); + render << emit_double_as_string(value->get_double()); } break; default: diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_javame_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_javame_generator.cc index 24b756004..0f4181dc3 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_javame_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_javame_generator.cc @@ -454,8 +454,8 @@ void t_javame_generator::print_const_value(std::ofstream& out, } else if (type->is_struct() || type->is_xception()) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; out << name << " = new " << type_name(type, false, true) << "();" << endl; if (!in_static) { indent(out) << "static {" << endl; @@ -489,8 +489,8 @@ void t_javame_generator::print_const_value(std::ofstream& out, } t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); string val = render_const_value(out, name, vtype, v_iter->second); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_js_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_js_generator.cc index f45ef5736..7d160b911 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_js_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_js_generator.cc @@ -20,7 +20,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -58,6 +60,7 @@ public: gen_node_ = false; gen_jquery_ = false; gen_ts_ = false; + gen_es6_ = false; bool with_ns_ = false; @@ -70,6 +73,8 @@ public: gen_ts_ = true; } else if( iter->first.compare("with_ns") == 0) { with_ns_ = true; + } else if( iter->first.compare("es6") == 0) { + gen_es6_ = true; } else { throw "unknown option js:" + iter->first; } @@ -79,6 +84,10 @@ public: throw "Invalid switch: [-gen js:node,ts] options not compatible"; } + if (gen_es6_ && gen_jquery_) { + throw "Invalid switch: [-gen js:es6,jquery] options not compatible"; + } + if (gen_node_ && gen_jquery_) { throw "Invalid switch: [-gen js:node,jquery] options not compatible, try: [-gen js:node -gen " "js:jquery]"; @@ -327,6 +336,11 @@ private: */ bool gen_ts_; + /** + * True if we should generate ES6 code, i.e. with Promises + */ + bool gen_es6_; + /** * The name of the defined module(s), for TypeScript Definition Files. */ @@ -370,7 +384,7 @@ void t_js_generator::init_generator() { // Print header f_types_ << autogen_comment(); - if (gen_node_ && no_ns_) { + if ((gen_node_ || gen_es6_) && no_ns_) { f_types_ << "\"use strict\";" << endl << endl; } @@ -408,10 +422,13 @@ void t_js_generator::init_generator() { */ string t_js_generator::js_includes() { if (gen_node_) { - return string( + string result = string( "var thrift = require('thrift');\n" - "var Thrift = thrift.Thrift;\n" - "var Q = thrift.Q;\n"); + "var Thrift = thrift.Thrift;\n"); + if (!gen_es6_) { + result += "var Q = thrift.Q;\n"; + } + return result; } return ""; @@ -549,7 +566,7 @@ string t_js_generator::render_const_value(t_type* type, t_const_value* value) { if (value->get_type() == t_const_value::CV_INTEGER) { out << value->get_integer(); } else { - out << value->get_double(); + out << emit_double_as_string(value->get_double()); } break; default: @@ -562,8 +579,8 @@ string t_js_generator::render_const_value(t_type* type, t_const_value* value) { indent_up(); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -589,8 +606,8 @@ string t_js_generator::render_const_value(t_type* type, t_const_value* value) { out << "{" << endl; indent_up(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { if (v_iter != val.begin()) out << "," << endl; @@ -965,7 +982,7 @@ void t_js_generator::generate_service(t_service* tservice) { f_service_ << autogen_comment(); - if (gen_node_ && no_ns_) { + if ((gen_node_ || gen_es6_) && no_ns_) { f_service_ << "\"use strict\";" << endl << endl; } @@ -1373,19 +1390,42 @@ void t_js_generator::generate_service_client(t_service* tservice) { // Open function f_service_ << js_namespace(tservice->get_program()) << service_name_ << "Client.prototype." - << function_signature(*f_iter, "", true) << " {" << endl; + << function_signature(*f_iter, "", !gen_es6_) << " {" << endl; indent_up(); if (gen_ts_) { - f_service_ts_ << ts_print_doc(*f_iter) << - // function definition without callback - ts_indent() << ts_function_signature(*f_iter, false) << endl << ts_print_doc(*f_iter) << - // overload with callback - ts_indent() << ts_function_signature(*f_iter, true) << endl; + // function definition without callback + f_service_ts_ << ts_print_doc(*f_iter) << ts_indent() << ts_function_signature(*f_iter, false) << endl; + + if (!gen_es6_) { + // overload with callback + f_service_ts_ << ts_print_doc(*f_iter) << ts_indent() << ts_function_signature(*f_iter, true) << endl; + } } - if (gen_node_) { // Node.js output ./gen-nodejs + if (gen_es6_ && gen_node_) { + f_service_ << indent() << "this._seqid = this.new_seqid();" << endl; + f_service_ << indent() << "var self = this;" << endl << indent() + << "return new Promise(function(resolve, reject) {" << endl; + indent_up(); + f_service_ << indent() << "self._reqs[self.seqid()] = function(error, result) {" << endl; + indent_up(); + indent(f_service_) << "if (error) {" << endl; + indent_up(); + indent(f_service_) << "reject(error);" << endl; + indent_down(); + indent(f_service_) << "} else {" << endl; + indent_up(); + indent(f_service_) << "resolve(result);" << endl; + indent_down(); + indent(f_service_) << "}" << endl; + indent_down(); + indent(f_service_) << "};" << endl; + f_service_ << indent() << "self.send_" << funname << "(" << arglist << ");" << endl; + indent_down(); + f_service_ << indent() << "});" << endl; + } else if (gen_node_) { // Node.js output ./gen-nodejs f_service_ << indent() << "this._seqid = this.new_seqid();" << endl << indent() << "if (callback === undefined) {" << endl; indent_up(); @@ -1412,6 +1452,23 @@ void t_js_generator::generate_service_client(t_service* tservice) { << "this.send_" << funname << "(" << arglist << ");" << endl; indent_down(); indent(f_service_) << "}" << endl; + } else if (gen_es6_) { + f_service_ << indent() << "var self = this;" << endl << indent() + << "return new Promise(function(resolve, reject) {" << endl; + indent_up(); + f_service_ << indent() << "self.send_" << funname << "(" << arglist + << (arglist.empty() ? "" : ", ") << "function(error, result) {" << endl; + indent_up(); + f_service_ << indent() << "if (error) {" << endl; + f_service_ << indent() << " reject(error);" << endl; + f_service_ << indent() << "} else {" << endl; + f_service_ << indent() << " resolve(result);" << endl; + f_service_ << indent() << "}" << endl; + indent_down(); + f_service_ << indent() << "});" << endl; + indent_down(); + f_service_ << indent() << "});" << endl; + } else if (gen_jquery_) { // jQuery output ./gen-js f_service_ << indent() << "if (callback === undefined) {" << endl; indent_up(); @@ -1507,10 +1564,29 @@ void t_js_generator::generate_service_client(t_service* tservice) { } else { if (gen_jquery_) { f_service_ << indent() << "return this.output.getTransport().flush(callback);" << endl; + } else if (gen_es6_) { + f_service_ << indent() << "var self = this;" << endl; + if((*f_iter)->is_oneway()) { + f_service_ << indent() << "this.output.getTransport().flush(true, null);" << endl; + f_service_ << indent() << "callback();" << endl; + } else { + f_service_ << indent() << "this.output.getTransport().flush(true, function() {" << endl; + indent_up(); + f_service_ << indent() << "var error = null, result = null;" << endl; + f_service_ << indent() << "try {" << endl; + f_service_ << indent() << " result = self.recv_" << funname << "();" << endl; + f_service_ << indent() << "} catch (e) {" << endl; + f_service_ << indent() << " error = e;" << endl; + f_service_ << indent() << "}" << endl; + f_service_ << indent() << "callback(error, result);" << endl; + indent_down(); + f_service_ << indent() << "});"; + } } else { f_service_ << indent() << "if (callback) {" << endl; if((*f_iter)->is_oneway()) { f_service_ << indent() << " this.output.getTransport().flush(true, null);" << endl; + f_service_ << indent() << " callback();" << endl; } else { f_service_ << indent() << " var self = this;" << endl; f_service_ << indent() << " this.output.getTransport().flush(true, function() {" << endl; @@ -2238,7 +2314,12 @@ std::string t_js_generator::ts_function_signature(t_function* tfunction, bool in str += "void;"; } } else { - str += "): " + ts_get_type(tfunction->get_returntype()) + ";"; + if (gen_es6_) { + str += "): Promise<" + ts_get_type(tfunction->get_returntype()) + ">;"; + } + else { + str += "): " + ts_get_type(tfunction->get_returntype()) + ";"; + } } return str; @@ -2282,4 +2363,5 @@ THRIFT_REGISTER_GENERATOR(js, " jquery: Generate jQuery compatible code.\n" " node: Generate node.js compatible code.\n" " ts: Generate TypeScript definition files.\n" - " with_ns: Create global namespace objects when using node.js\n") + " with_ns: Create global namespace objects when using node.js\n" + " es6: Create ES6 code with Promises\n") diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_json_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_json_generator.cc index 153ec35d2..cf5f80185 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_json_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_json_generator.cc @@ -510,8 +510,8 @@ void t_json_generator::write_const_value(t_const_value* value, bool should_force case t_const_value::CV_MAP: { start_object(NO_INDENT); - std::map map = value->get_map(); - std::map::iterator mit; + std::map map = value->get_map(); + std::map::iterator mit; for (mit = map.begin(); mit != map.end(); ++mit) { write_comma_if_needed(); f_json_ << indent(); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_lua_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_lua_generator.cc index 92e6749de..b4a6793eb 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_lua_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_lua_generator.cc @@ -276,8 +276,8 @@ string t_lua_generator::render_const_value(t_type* type, t_const_value* value) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end();) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -308,8 +308,8 @@ string t_lua_generator::render_const_value(t_type* type, t_const_value* value) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end();) { indent(out) << "[" << render_const_value(ktype, v_iter->first) << "] = " << render_const_value(vtype, v_iter->second); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.cc index 8d157a9e1..dbf2fd0ce 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.cc @@ -35,6 +35,7 @@ #include "thrift/platform.h" #include "thrift/generate/t_oop_generator.h" +#include "thrift/generate/t_netcore_generator.h" using std::map; using std::ofstream; @@ -43,211 +44,124 @@ using std::string; using std::stringstream; using std::vector; -//TODO: check for indentation +//TODO: check for indentation //TODO: Do we need seqId_ in generation? -static const string endl = "\n"; // avoid ostream << std::endl flushes - -struct member_mapping_scope +t_netcore_generator::t_netcore_generator(t_program* program, const map& parsed_options, const string& option_string) + : t_oop_generator(program) { - void* scope_member; - map mapping_table; -}; + (void)option_string; -class t_netcore_generator : public t_oop_generator + nullable_ = false; + hashcode_ = false; + union_ = false; + serialize_ = false; + wcf_ = false; + wcf_namespace_.clear(); + + map::const_iterator iter; + + for (iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) + { + if (iter->first.compare("nullable") == 0) + { + nullable_ = true; + } + else if (iter->first.compare("hashcode") == 0) + { + hashcode_ = true; + } + else if (iter->first.compare("union") == 0) + { + union_ = true; + } + else if (iter->first.compare("serial") == 0) + { + serialize_ = true; + wcf_namespace_ = iter->second; // since there can be only one namespace + } + else if (iter->first.compare("wcf") == 0) + { + wcf_ = true; + wcf_namespace_ = iter->second; + } + else + { + throw "unknown option netcore:" + iter->first; + } + } + + out_dir_base_ = "gen-netcore"; +} + +static string correct_function_name_for_async(string const& function_name) { -public: - t_netcore_generator(t_program* program, const map& parsed_options, const string& option_string) - : t_oop_generator(program) + string const async_end = "Async"; + size_t i = function_name.find(async_end); + if (i != string::npos) { - (void)option_string; - - nullable_ = false; - hashcode_ = false; - union_ = false; - serialize_ = false; - wcf_ = false; - wcf_namespace_.clear(); - - map::const_iterator iter; - - for (iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) - { - if (iter->first.compare("nullable") == 0) - { - nullable_ = true; - } - else if (iter->first.compare("hashcode") == 0) - { - hashcode_ = true; - } - else if (iter->first.compare("union") == 0) - { - union_ = true; - } - else if (iter->first.compare("serial") == 0) - { - serialize_ = true; - wcf_namespace_ = iter->second; // since there can be only one namespace - } - else if (iter->first.compare("wcf") == 0) - { - wcf_ = true; - wcf_namespace_ = iter->second; - } - else - { - throw "unknown option netcore:" + iter->first; - } - } - - out_dir_base_ = "gen-netcore"; + return function_name + async_end; } - // overrides - void init_generator(); - void close_generator(); - void generate_consts(vector consts); - void generate_typedef(t_typedef* ttypedef); - void generate_enum(t_enum* tenum); - void generate_struct(t_struct* tstruct); - void generate_xception(t_struct* txception); - void generate_service(t_service* tservice); + return function_name; +} - void generate_property(ofstream& out, t_field* tfield, bool isPublic, bool generateIsset); - void generate_netcore_property(ofstream& out, t_field* tfield, bool isPublic, bool includeIsset = true, string fieldPrefix = ""); - bool print_const_value(ofstream& out, string name, t_type* type, t_const_value* value, bool in_static, bool defval = false, bool needtype = false); - string render_const_value(ofstream& out, string name, t_type* type, t_const_value* value); - void print_const_constructor(ofstream& out, vector consts); - void print_const_def_value(ofstream& out, string name, t_type* type, t_const_value* value); - void generate_netcore_struct(t_struct* tstruct, bool is_exception); - void generate_netcore_union(t_struct* tunion); - void generate_netcore_struct_definition(ofstream& out, t_struct* tstruct, bool is_xception = false, bool in_class = false, bool is_result = false); - void generate_netcore_union_definition(ofstream& out, t_struct* tunion); - void generate_netcore_union_class(ofstream& out, t_struct* tunion, t_field* tfield); - void generate_netcore_wcffault(ofstream& out, t_struct* tstruct); - void generate_netcore_struct_reader(ofstream& out, t_struct* tstruct); - void generate_netcore_struct_result_writer(ofstream& out, t_struct* tstruct); - void generate_netcore_struct_writer(ofstream& out, t_struct* tstruct); - void generate_netcore_struct_tostring(ofstream& out, t_struct* tstruct); - void generate_netcore_struct_equals(ofstream& out, t_struct* tstruct); - void generate_netcore_struct_hashcode(ofstream& out, t_struct* tstruct); - void generate_netcore_union_reader(ofstream& out, t_struct* tunion); - void generate_function_helpers(ofstream& out, t_function* tfunction); - void generate_service_interface(ofstream& out, t_service* tservice); - void generate_service_helpers(ofstream& out, t_service* tservice); - void generate_service_client(ofstream& out, t_service* tservice); - void generate_service_server(ofstream& out, t_service* tservice); - void generate_process_function_async(ofstream& out, t_service* tservice, t_function* function); - void generate_deserialize_field(ofstream& out, t_field* tfield, string prefix = "", bool is_propertyless = false); - void generate_deserialize_struct(ofstream& out, t_struct* tstruct, string prefix = ""); - void generate_deserialize_container(ofstream& out, t_type* ttype, string prefix = ""); - void generate_deserialize_set_element(ofstream& out, t_set* tset, string prefix = ""); - void generate_deserialize_map_element(ofstream& out, t_map* tmap, string prefix = ""); - void generate_deserialize_list_element(ofstream& out, t_list* list, string prefix = ""); - void generate_serialize_field(ofstream& out, t_field* tfield, string prefix = "", bool is_element = false, bool is_propertyless = false); - void generate_serialize_struct(ofstream& out, t_struct* tstruct, string prefix = ""); - void generate_serialize_container(ofstream& out, t_type* ttype, string prefix = ""); - void generate_serialize_map_element(ofstream& out, t_map* tmap, string iter, string map); - void generate_serialize_set_element(ofstream& out, t_set* tmap, string iter); - void generate_serialize_list_element(ofstream& out, t_list* tlist, string iter); - void generate_netcore_doc(ofstream& out, t_field* field); - void generate_netcore_doc(ofstream& out, t_doc* tdoc); - void generate_netcore_doc(ofstream& out, t_function* tdoc); - void generate_netcore_docstring_comment(ofstream& out, string contents); - void docstring_comment(ofstream& out, const string& comment_start, const string& line_prefix, const string& contents, const string& comment_end); - void start_netcore_namespace(ofstream& out); - void end_netcore_namespace(ofstream& out); - - string netcore_type_usings() const; - string netcore_thrift_usings() const; - string type_name(t_type* ttype, bool in_countainer = false, bool in_init = false, bool in_param = false, bool is_required = false); - string base_type_name(t_base_type* tbase, bool in_container = false, bool in_param = false, bool is_required = false); - string declare_field(t_field* tfield, bool init = false, string prefix = ""); - string function_signature_async(t_function* tfunction, string prefix = ""); - string function_signature(t_function* tfunction, string prefix = ""); - string argument_list(t_struct* tstruct); - string type_to_enum(t_type* ttype); - string prop_name(t_field* tfield, bool suppress_mapping = false); - string get_enum_class_name(t_type* type); - - static string correct_function_name_for_async(string const& function_name) +/** +* \brief Search and replace "_args" substring in struct name if exist (for C# class naming) +* \param struct_name +* \return Modified struct name ("Struct_args" -> "StructArgs") or original name +*/ +static string check_and_correct_struct_name(const string& struct_name) +{ + string args_end = "_args"; + size_t i = struct_name.find(args_end); + if (i != string::npos) { - string const async_end = "Async"; - size_t i = function_name.find(async_end); - if (i != string::npos) - { - return function_name + async_end; - } - - return function_name; + string new_struct_name = struct_name; + new_struct_name.replace(i, args_end.length(), "Args"); + return new_struct_name; } - /** - * \brief Search and replace "_args" substring in struct name if exist (for C# class naming) - * \param struct_name - * \return Modified struct name ("Struct_args" -> "StructArgs") or original name - */ - static string check_and_correct_struct_name(const string& struct_name) + string result_end = "_result"; + size_t j = struct_name.find(result_end); + if (j != string::npos) { - string args_end = "_args"; - size_t i = struct_name.find(args_end); - if (i != string::npos) - { - string new_struct_name = struct_name; - new_struct_name.replace(i, args_end.length(), "Args"); - return new_struct_name; - } - - string result_end = "_result"; - size_t j = struct_name.find(result_end); - if (j != string::npos) - { - string new_struct_name = struct_name; - new_struct_name.replace(j, result_end.length(), "Result"); - return new_struct_name; - } - - return struct_name; + string new_struct_name = struct_name; + new_struct_name.replace(j, result_end.length(), "Result"); + return new_struct_name; } - static bool field_has_default(t_field* tfield) { return tfield->get_value() != NULL; } + return struct_name; +} - static bool field_is_required(t_field* tfield) { return tfield->get_req() == t_field::T_REQUIRED; } +static bool field_has_default(t_field* tfield) { return tfield->get_value() != NULL; } - static bool type_can_be_null(t_type* ttype) +static bool field_is_required(t_field* tfield) { return tfield->get_req() == t_field::T_REQUIRED; } + +static bool type_can_be_null(t_type* ttype) +{ + while (ttype->is_typedef()) { - while (ttype->is_typedef()) - { - ttype = static_cast(ttype)->get_type(); - } - - return ttype->is_container() || ttype->is_struct() || ttype->is_xception() || ttype->is_string(); + ttype = static_cast(ttype)->get_type(); } + return ttype->is_container() || ttype->is_struct() || ttype->is_xception() || ttype->is_string(); +} -private: - string namespace_name_; - string namespace_dir_; +bool t_netcore_generator::is_wcf_enabled() const { return wcf_; } - bool nullable_; - bool union_; - bool hashcode_; - bool serialize_; - bool wcf_; +bool t_netcore_generator::is_nullable_enabled() const { return nullable_; } - string wcf_namespace_; - map netcore_keywords; - vector member_mapping_scopes; +bool t_netcore_generator::is_hashcode_enabled() const { return hashcode_; } - void init_keywords(); - string normalize_name(string name); - string make_valid_csharp_identifier(string const& fromName); - void prepare_member_name_mapping(t_struct* tstruct); - void prepare_member_name_mapping(void* scope, const vector& members, const string& structname); - void cleanup_member_name_mapping(void* scope); - string get_mapped_member_name(string oldname); -}; +bool t_netcore_generator::is_serialize_enabled() const { return serialize_; } + +bool t_netcore_generator::is_union_enabled() const { return union_; } + +map t_netcore_generator::get_keywords_list() const +{ + return netcore_keywords; +} void t_netcore_generator::init_generator() { @@ -414,6 +328,8 @@ void t_netcore_generator::init_keywords() netcore_keywords["var"] = 1; netcore_keywords["where"] = 1; netcore_keywords["yield"] = 1; + + netcore_keywords["when"] = 1; } void t_netcore_generator::start_netcore_namespace(ofstream& out) @@ -479,35 +395,40 @@ void t_netcore_generator::generate_typedef(t_typedef* ttypedef) void t_netcore_generator::generate_enum(t_enum* tenum) { - int ic = indent_count(); - + int ic = indent_count(); string f_enum_name = namespace_dir_ + "/" + tenum->get_name() + ".cs"; ofstream f_enum; f_enum.open(f_enum_name.c_str()); - f_enum << autogen_comment() << endl; - start_netcore_namespace(f_enum); - generate_netcore_doc(f_enum, tenum); + generate_enum(f_enum, tenum); - f_enum << indent() << "public enum " << tenum->get_name() << endl; - scope_up(f_enum); - - vector constants = tenum->get_constants(); - vector::iterator c_iter; - - for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) - { - generate_netcore_doc(f_enum, *c_iter); - int value = (*c_iter)->get_value(); - f_enum << indent() << (*c_iter)->get_name() << " = " << value << "," << endl; - } - - scope_down(f_enum); - end_netcore_namespace(f_enum); f_enum.close(); + indent_validate(ic, "generate_enum"); +} - indent_validate(ic, "generate_enum"); +void t_netcore_generator::generate_enum(ofstream& out, t_enum* tenum) +{ + out << autogen_comment() << endl; + + start_netcore_namespace(out); + generate_netcore_doc(out, tenum); + + out << indent() << "public enum " << tenum->get_name() << endl; + scope_up(out); + + vector constants = tenum->get_constants(); + vector::iterator c_iter; + + for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) + { + generate_netcore_doc(out, *c_iter); + int value = (*c_iter)->get_value(); + out << indent() << (*c_iter)->get_name() << " = " << value << "," << endl; + } + + scope_down(out); + end_netcore_namespace(out); } void t_netcore_generator::generate_consts(vector consts) @@ -521,20 +442,32 @@ void t_netcore_generator::generate_consts(vector consts) ofstream f_consts; f_consts.open(f_consts_name.c_str()); - f_consts << autogen_comment() << netcore_type_usings() << endl; + generate_consts(f_consts, consts); - start_netcore_namespace(f_consts); + f_consts.close(); +} - f_consts << indent() << "public static class " << make_valid_csharp_identifier(program_name_) << "Constants" << endl; +void t_netcore_generator::generate_consts(ofstream& out, vector consts) +{ + if (consts.empty()) + { + return; + } - scope_up(f_consts); + out << autogen_comment() << netcore_type_usings() << endl; + + start_netcore_namespace(out); + + out << indent() << "public static class " << make_valid_csharp_identifier(program_name_) << "Constants" << endl; + + scope_up(out); vector::iterator c_iter; bool need_static_constructor = false; for (c_iter = consts.begin(); c_iter != consts.end(); ++c_iter) { - generate_netcore_doc(f_consts, *c_iter); - if (print_const_value(f_consts, (*c_iter)->get_name(), (*c_iter)->get_type(), (*c_iter)->get_value(), false)) + generate_netcore_doc(out, *c_iter); + if (print_const_value(out, (*c_iter)->get_name(), (*c_iter)->get_type(), (*c_iter)->get_value(), false)) { need_static_constructor = true; } @@ -542,12 +475,11 @@ void t_netcore_generator::generate_consts(vector consts) if (need_static_constructor) { - print_const_constructor(f_consts, consts); + print_const_constructor(out, consts); } - scope_down(f_consts); - end_netcore_namespace(f_consts); - f_consts.close(); + scope_down(out); + end_netcore_namespace(out); } void t_netcore_generator::print_const_def_value(ofstream& out, string name, t_type* type, t_const_value* value) @@ -555,15 +487,15 @@ void t_netcore_generator::print_const_def_value(ofstream& out, string name, t_ty if (type->is_struct() || type->is_xception()) { const vector& fields = static_cast(type)->get_members(); - const map& val = value->get_map(); + const map& val = value->get_map(); vector::const_iterator f_iter; - map::const_iterator v_iter; + map::const_iterator v_iter; prepare_member_name_mapping(static_cast(type)); for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_field* field = NULL; - + for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { if ((*f_iter)->get_name() == v_iter->first->get_string()) @@ -578,7 +510,7 @@ void t_netcore_generator::print_const_def_value(ofstream& out, string name, t_ty } t_type* field_type = field->get_type(); - + string val = render_const_value(out, name, field_type, v_iter->second); out << indent() << name << "." << prop_name(field) << " = " << val << ";" << endl; } @@ -589,8 +521,8 @@ void t_netcore_generator::print_const_def_value(ofstream& out, string name, t_ty { t_type* ktype = static_cast(type)->get_key_type(); t_type* vtype = static_cast(type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { string key = render_const_value(out, name, ktype, v_iter->first); @@ -654,7 +586,7 @@ bool t_netcore_generator::print_const_value(ofstream& out, string name, t_type* if (type->is_base_type()) { string v2 = render_const_value(out, name, type, value); - out << name << " = " << v2 << ";" << endl; + out << normalize_name(name) << " = " << v2 << ";" << endl; need_static_construction = false; } else if (type->is_enum()) @@ -791,7 +723,7 @@ void t_netcore_generator::generate_netcore_struct_definition(ofstream& out, t_st out << indent() << "public " << (is_final ? "sealed " : "") << "partial class " << sharp_struct_name << " : "; - if (is_exception) + if (is_exception) { out << "TException, "; } @@ -854,7 +786,7 @@ void t_netcore_generator::generate_netcore_struct_definition(ofstream& out, t_st out << indent() << "public struct Isset" << endl << indent() << "{" << endl; indent_up(); - + for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) { bool is_required = field_is_required((*m_iter)); @@ -980,7 +912,7 @@ void t_netcore_generator::generate_netcore_struct_definition(ofstream& out, t_st generate_netcore_struct_hashcode(out, tstruct); } generate_netcore_struct_tostring(out, tstruct); - + indent_down(); out << indent() << "}" << endl << endl; @@ -1036,7 +968,7 @@ void t_netcore_generator::generate_netcore_struct_reader(ofstream& out, t_struct << indent() << "try" << endl << indent() << "{" << endl; indent_up(); - + const vector& fields = tstruct->get_members(); vector::const_iterator f_iter; @@ -1049,7 +981,7 @@ void t_netcore_generator::generate_netcore_struct_reader(ofstream& out, t_struct } } - out << indent() << "TField field;" << endl + out << indent() << "TField field;" << endl << indent() << "await iprot.ReadStructBeginAsync(cancellationToken);" << endl << indent() << "while (true)" << endl << indent() << "{" << endl; @@ -1064,7 +996,7 @@ void t_netcore_generator::generate_netcore_struct_reader(ofstream& out, t_struct << indent() << "switch (field.ID)" << endl << indent() << "{" << endl; indent_up(); - + for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { bool is_required = field_is_required(*f_iter); @@ -1118,7 +1050,7 @@ void t_netcore_generator::generate_netcore_struct_reader(ofstream& out, t_struct out << indent() << "}" << endl; } } - + indent_down(); out << indent() << "}" << endl; out << indent() << "finally" << endl @@ -1215,7 +1147,7 @@ void t_netcore_generator::generate_netcore_struct_result_writer(ofstream& out, t indent_up(); out << indent() << "oprot.IncrementRecursionDepth();" << endl - << indent() << "try" << endl + << indent() << "try" << endl << indent() << "{" << endl; indent_up(); @@ -1401,7 +1333,7 @@ void t_netcore_generator::generate_netcore_union(t_struct* tunion) generate_netcore_union_definition(f_union, tunion); f_union.close(); - + indent_validate(ic, "generate_netcore_union."); } @@ -1414,7 +1346,7 @@ void t_netcore_generator::generate_netcore_union_definition(ofstream& out, t_str << indent() << "{" << endl; indent_up(); - out << indent() << "public abstract void Write(TProtocol protocol);" << endl + out << indent() << "public abstract Task WriteAsync(TProtocol tProtocol, CancellationToken cancellationToken);" << endl << indent() << "public readonly bool Isset;" << endl << indent() << "public abstract object Data { get; }" << endl << indent() << "protected " << tunion->get_name() << "(bool isset)" << endl @@ -1431,7 +1363,7 @@ void t_netcore_generator::generate_netcore_union_definition(ofstream& out, t_str out << indent() << "public override object Data { get { return null; } }" << endl << indent() << "public ___undefined() : base(false) {}" << endl << endl; - out << indent() << "public override void Write(TProtocol protocol)" << endl + out << indent() << "public override Task WriteAsync(TProtocol oprot, CancellationToken cancellationToken)" << endl << indent() << "{" << endl; indent_up(); out << indent() << "throw new TProtocolException( TProtocolException.INVALID_DATA, \"Cannot persist an union type which is not set.\");" << endl; @@ -1491,8 +1423,8 @@ void t_netcore_generator::generate_netcore_union_class(ofstream& out, t_struct* generate_serialize_field(out, tfield, "_data", true, true); out << indent() << "await oprot.WriteFieldEndAsync(cancellationToken);" << endl - << indent() << "await oprot.WriteFieldStop(cancellationToken);" << endl - << indent() << "await oprot.WriteStructEnd(cancellationToken);" << endl; + << indent() << "await oprot.WriteFieldStopAsync(cancellationToken);" << endl + << indent() << "await oprot.WriteStructEndAsync(cancellationToken);" << endl; indent_down(); out << indent() << "}" << endl << indent() << "finally" << endl @@ -1511,7 +1443,7 @@ void t_netcore_generator::generate_netcore_struct_equals(ofstream& out, t_struct out << indent() << "public override bool Equals(object that)" << endl << indent() << "{" << endl; indent_up(); - out << indent() << "var other = that as " << type_name(tstruct) << ";" << endl + out << indent() << "var other = that as " << check_and_correct_struct_name(normalize_name(tstruct->get_name())) << ";" << endl << indent() << "if (other == null) return false;" << endl << indent() << "if (ReferenceEquals(this, other)) return true;" << endl; @@ -1788,7 +1720,7 @@ void t_netcore_generator::generate_service_client(ofstream& out, t_service* tser << indent() << "throw x;" << endl; indent_down(); - out << indent() << "}" << endl + out << indent() << "}" << endl << endl << indent() << "var result = new " << resultname << "();" << endl << indent() << "await result.ReadAsync(InputProtocol, cancellationToken);" << endl @@ -1809,7 +1741,7 @@ void t_netcore_generator::generate_service_client(ofstream& out, t_service* tser } else { - out << indent() << "if (result.Success.HasValue)" << endl + out << indent() << "if (result.Success.HasValue)" << endl << indent() << "{" << endl; indent_up(); out << indent() << "return result.Success.Value;" << endl; @@ -1895,7 +1827,7 @@ void t_netcore_generator::generate_service_server(ofstream& out, t_service* tser indent_up(); - out << indent() << "private IAsync _iAsync;" << endl + out << indent() << "private IAsync _iAsync;" << endl << endl << indent() << "public AsyncProcessor(IAsync iAsync)"; @@ -1907,7 +1839,7 @@ void t_netcore_generator::generate_service_server(ofstream& out, t_service* tser out << endl << indent() << "{" << endl; indent_up(); - + out << indent() << "if (iAsync == null) throw new ArgumentNullException(nameof(iAsync));" << endl << endl << indent() << "_iAsync = iAsync;" << endl; @@ -2033,7 +1965,7 @@ void t_netcore_generator::generate_function_helpers(ofstream& out, t_function* t void t_netcore_generator::generate_process_function_async(ofstream& out, t_service* tservice, t_function* tfunction) { (void)tservice; - out << indent() << "public async Task " << tfunction->get_name() + out << indent() << "public async Task " << tfunction->get_name() << "_ProcessAsync(int seqid, TProtocol iprot, TProtocol oprot, CancellationToken cancellationToken)" << endl << indent() << "{" << endl; indent_up(); @@ -2097,7 +2029,7 @@ void t_netcore_generator::generate_process_function_async(ofstream& out, t_servi } cleanup_member_name_mapping(arg_struct); - + if (!first) { out << ", "; @@ -2112,7 +2044,7 @@ void t_netcore_generator::generate_process_function_async(ofstream& out, t_servi { indent_down(); out << indent() << "}" << endl; - + for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) { out << indent() << "catch (" << type_name((*x_iter)->get_type(), false, false) << " " << (*x_iter)->get_name() << ")" << endl @@ -2130,7 +2062,7 @@ void t_netcore_generator::generate_process_function_async(ofstream& out, t_servi if (!tfunction->is_oneway()) { - out << indent() << "await oprot.WriteMessageBeginAsync(new TMessage(\"" + out << indent() << "await oprot.WriteMessageBeginAsync(new TMessage(\"" << correct_function_name_for_async(tfunction->get_name()) << "\", TMessageType.Reply, seqid), cancellationToken); " << endl << indent() << "await result.WriteAsync(oprot, cancellationToken);" << endl; } @@ -2178,7 +2110,7 @@ void t_netcore_generator::generate_netcore_union_reader(ofstream& out, t_struct* const vector& fields = tunion->get_members(); vector::const_iterator f_iter; - out << indent() << "public static " << tunion->get_name() << " Read(TProtocol iprot)" << endl; + out << indent() << "public static async Task<" << tunion->get_name() << "> ReadAsync(TProtocol iprot, CancellationToken cancellationToken)" << endl; scope_up(out); out << indent() << "iprot.IncrementRecursionDepth();" << endl; @@ -2186,12 +2118,12 @@ void t_netcore_generator::generate_netcore_union_reader(ofstream& out, t_struct* scope_up(out); out << indent() << tunion->get_name() << " retval;" << endl; - out << indent() << "iprot.ReadStructBegin();" << endl; - out << indent() << "TField field = iprot.ReadFieldBegin();" << endl; + out << indent() << "await iprot.ReadStructBeginAsync(cancellationToken);" << endl; + out << indent() << "TField field = await iprot.ReadFieldBeginAsync(cancellationToken);" << endl; // we cannot have the first field be a stop -- we must have a single field defined out << indent() << "if (field.Type == TType.Stop)" << endl; scope_up(out); - out << indent() << "iprot.ReadFieldEnd();" << endl; + out << indent() << "await iprot.ReadFieldEndAsync(cancellationToken);" << endl; out << indent() << "retval = new ___undefined();" << endl; scope_down(out); out << indent() << "else" << endl; @@ -2211,7 +2143,7 @@ void t_netcore_generator::generate_netcore_union_reader(ofstream& out, t_struct* out << indent() << "retval = new " << (*f_iter)->get_name() << "(temp);" << endl; indent_down(); - out << indent() << "} else { " << endl << indent() << " TProtocolUtil.Skip(iprot, field.Type);" + out << indent() << "} else { " << endl << indent() << " await TProtocolUtil.SkipAsync(iprot, field.Type, cancellationToken);" << endl << indent() << " retval = new ___undefined();" << endl << indent() << "}" << endl << indent() << "break;" << endl; indent_down(); @@ -2219,23 +2151,23 @@ void t_netcore_generator::generate_netcore_union_reader(ofstream& out, t_struct* out << indent() << "default: " << endl; indent_up(); - out << indent() << "TProtocolUtil.Skip(iprot, field.Type);" << endl << indent() + out << indent() << "await TProtocolUtil.SkipAsync(iprot, field.Type, cancellationToken);" << endl << indent() << "retval = new ___undefined();" << endl; out << indent() << "break;" << endl; indent_down(); scope_down(out); - out << indent() << "iprot.ReadFieldEnd();" << endl; + out << indent() << "await iprot.ReadFieldEndAsync(cancellationToken);" << endl; - out << indent() << "if (iprot.ReadFieldBegin().Type != TType.Stop)" << endl; + out << indent() << "if ((await iprot.ReadFieldBeginAsync(cancellationToken)).Type != TType.Stop)" << endl; scope_up(out); out << indent() << "throw new TProtocolException(TProtocolException.INVALID_DATA);" << endl; scope_down(out); // end of else for TStop scope_down(out); - out << indent() << "iprot.ReadStructEnd();" << endl; + out << indent() << "await iprot.ReadStructEndAsync(cancellationToken);" << endl; out << indent() << "return retval;" << endl; indent_down(); @@ -2342,7 +2274,7 @@ void t_netcore_generator::generate_deserialize_struct(ofstream& out, t_struct* t } else { - out << indent() << prefix << " = new " << type_name(tstruct) << "();" << endl + out << indent() << prefix << " = new " << type_name(tstruct) << "();" << endl << indent() << "await " << prefix << ".ReadAsync(iprot, cancellationToken);" << endl; } } @@ -2788,9 +2720,7 @@ void t_netcore_generator::prepare_member_name_mapping(t_struct* tstruct) void t_netcore_generator::prepare_member_name_mapping(void* scope, const vector& members, const string& structname) { // begin new scope - member_mapping_scope dummy; - dummy.scope_member = 0; - member_mapping_scopes.push_back(dummy); + member_mapping_scopes.push_back(member_mapping_scope()); member_mapping_scope& active = member_mapping_scopes.back(); active.scope_member = scope; @@ -3144,17 +3074,17 @@ void t_netcore_generator::docstring_comment(ofstream& out, const string& comment stringstream docs(contents, std::ios_base::in); - while (!(docs.eof() || docs.fail())) + while (!(docs.eof() || docs.fail())) { char line[1024]; docs.getline(line, 1024); // Just prnt a newline when the line & prefix are empty. - if (strlen(line) == 0 && line_prefix == "" && !docs.eof()) + if (strlen(line) == 0 && line_prefix == "" && !docs.eof()) { out << endl; } - else if (strlen(line) > 0 || !docs.eof()) + else if (strlen(line) > 0 || !docs.eof()) { // skip the empty last line out << indent() << line_prefix << line << endl; } diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.h b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.h new file mode 100644 index 000000000..8d52307f1 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_netcore_generator.h @@ -0,0 +1,137 @@ +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "thrift/platform.h" +#include "thrift/generate/t_oop_generator.h" + +using std::map; +using std::ofstream; +using std::ostringstream; +using std::string; +using std::stringstream; +using std::vector; + +static const string endl = "\n"; // avoid ostream << std::endl flushes + +class t_netcore_generator : public t_oop_generator +{ + + struct member_mapping_scope + { + public: + member_mapping_scope() : scope_member(0) { } + void* scope_member; + map mapping_table; + }; + +public: + t_netcore_generator(t_program* program, const map& parsed_options, const string& option_string); + + bool is_wcf_enabled() const; + bool is_nullable_enabled() const; + bool is_hashcode_enabled() const; + bool is_serialize_enabled() const; + bool is_union_enabled() const; + map get_keywords_list() const; + + // overrides + void init_generator(); + void close_generator(); + void generate_consts(vector consts); + void generate_consts(ofstream& out, vector consts); + void generate_typedef(t_typedef* ttypedef); + void generate_enum(t_enum* tenum); + void generate_enum(ofstream& out, t_enum* tenum); + void generate_struct(t_struct* tstruct); + void generate_xception(t_struct* txception); + void generate_service(t_service* tservice); + + void generate_property(ofstream& out, t_field* tfield, bool isPublic, bool generateIsset); + void generate_netcore_property(ofstream& out, t_field* tfield, bool isPublic, bool includeIsset = true, string fieldPrefix = ""); + bool print_const_value(ofstream& out, string name, t_type* type, t_const_value* value, bool in_static, bool defval = false, bool needtype = false); + string render_const_value(ofstream& out, string name, t_type* type, t_const_value* value); + void print_const_constructor(ofstream& out, vector consts); + void print_const_def_value(ofstream& out, string name, t_type* type, t_const_value* value); + void generate_netcore_struct(t_struct* tstruct, bool is_exception); + void generate_netcore_union(t_struct* tunion); + void generate_netcore_struct_definition(ofstream& out, t_struct* tstruct, bool is_xception = false, bool in_class = false, bool is_result = false); + void generate_netcore_union_definition(ofstream& out, t_struct* tunion); + void generate_netcore_union_class(ofstream& out, t_struct* tunion, t_field* tfield); + void generate_netcore_wcffault(ofstream& out, t_struct* tstruct); + void generate_netcore_struct_reader(ofstream& out, t_struct* tstruct); + void generate_netcore_struct_result_writer(ofstream& out, t_struct* tstruct); + void generate_netcore_struct_writer(ofstream& out, t_struct* tstruct); + void generate_netcore_struct_tostring(ofstream& out, t_struct* tstruct); + void generate_netcore_struct_equals(ofstream& out, t_struct* tstruct); + void generate_netcore_struct_hashcode(ofstream& out, t_struct* tstruct); + void generate_netcore_union_reader(ofstream& out, t_struct* tunion); + void generate_function_helpers(ofstream& out, t_function* tfunction); + void generate_service_interface(ofstream& out, t_service* tservice); + void generate_service_helpers(ofstream& out, t_service* tservice); + void generate_service_client(ofstream& out, t_service* tservice); + void generate_service_server(ofstream& out, t_service* tservice); + void generate_process_function_async(ofstream& out, t_service* tservice, t_function* function); + void generate_deserialize_field(ofstream& out, t_field* tfield, string prefix = "", bool is_propertyless = false); + void generate_deserialize_struct(ofstream& out, t_struct* tstruct, string prefix = ""); + void generate_deserialize_container(ofstream& out, t_type* ttype, string prefix = ""); + void generate_deserialize_set_element(ofstream& out, t_set* tset, string prefix = ""); + void generate_deserialize_map_element(ofstream& out, t_map* tmap, string prefix = ""); + void generate_deserialize_list_element(ofstream& out, t_list* list, string prefix = ""); + void generate_serialize_field(ofstream& out, t_field* tfield, string prefix = "", bool is_element = false, bool is_propertyless = false); + void generate_serialize_struct(ofstream& out, t_struct* tstruct, string prefix = ""); + void generate_serialize_container(ofstream& out, t_type* ttype, string prefix = ""); + void generate_serialize_map_element(ofstream& out, t_map* tmap, string iter, string map); + void generate_serialize_set_element(ofstream& out, t_set* tmap, string iter); + void generate_serialize_list_element(ofstream& out, t_list* tlist, string iter); + void generate_netcore_doc(ofstream& out, t_field* field); + void generate_netcore_doc(ofstream& out, t_doc* tdoc); + void generate_netcore_doc(ofstream& out, t_function* tdoc); + void generate_netcore_docstring_comment(ofstream& out, string contents); + void docstring_comment(ofstream& out, const string& comment_start, const string& line_prefix, const string& contents, const string& comment_end); + void start_netcore_namespace(ofstream& out); + void end_netcore_namespace(ofstream& out); + + string netcore_type_usings() const; + string netcore_thrift_usings() const; + + string type_name(t_type* ttype, bool in_countainer = false, bool in_init = false, bool in_param = false, bool is_required = false); + string base_type_name(t_base_type* tbase, bool in_container = false, bool in_param = false, bool is_required = false); + string declare_field(t_field* tfield, bool init = false, string prefix = ""); + string function_signature_async(t_function* tfunction, string prefix = ""); + string function_signature(t_function* tfunction, string prefix = ""); + string argument_list(t_struct* tstruct); + string type_to_enum(t_type* ttype); + string prop_name(t_field* tfield, bool suppress_mapping = false); + string get_enum_class_name(t_type* type); + +private: + string namespace_name_; + string namespace_dir_; + + bool nullable_; + bool union_; + bool hashcode_; + bool serialize_; + bool wcf_; + + string wcf_namespace_; + map netcore_keywords; + vector member_mapping_scopes; + + void init_keywords(); + string normalize_name(string name); + string make_valid_csharp_identifier(string const& fromName); + void prepare_member_name_mapping(t_struct* tstruct); + void prepare_member_name_mapping(void* scope, const vector& members, const string& structname); + void cleanup_member_name_mapping(void* scope); + string get_mapped_member_name(string oldname); +}; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_ocaml_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_ocaml_generator.cc index 594219ae5..2bf85d1c2 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_ocaml_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_ocaml_generator.cc @@ -401,8 +401,8 @@ string t_ocaml_generator::render_const_value(t_type* type, t_const_value* value) indent_up(); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -425,8 +425,8 @@ string t_ocaml_generator::render_const_value(t_type* type, t_const_value* value) } else if (type->is_map()) { t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; string hm = tmp("_hm"); out << endl; indent_up(); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_perl_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_perl_generator.cc index 0c05cda04..76d343e89 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_perl_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_perl_generator.cc @@ -359,8 +359,8 @@ string t_perl_generator::render_const_value(t_type* type, t_const_value* value) const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -385,8 +385,8 @@ string t_perl_generator::render_const_value(t_type* type, t_const_value* value) out << "{" << endl; indent_up(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { indent(out) << render_const_value(ktype, v_iter->first); out << " => "; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_php_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_php_generator.cc index 11771c216..6ab6bf8eb 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_php_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_php_generator.cc @@ -62,7 +62,7 @@ public: validate_ = false; json_serializable_ = false; nsglobal_ = ""; // by default global namespace is empty - psr4_ = false; + classmap_ = false; for (iter = parsed_options.begin(); iter != parsed_options.end(); ++iter) { if (iter->first.compare("inlined") == 0) { binary_inline_ = true; @@ -78,8 +78,14 @@ public: json_serializable_ = true; } else if (iter->first.compare("nsglobal") == 0) { nsglobal_ = iter->second; + } else if (iter->first.compare("classmap") == 0) { + classmap_ = true; } else if (iter->first.compare("psr4") == 0) { - psr4_ = true; + if(classmap_){ + throw "psr4 and classmap are mutually exclusive."; + } else { + pwarning(0, "psr4 is default option! needn't add psr4 option!\n"); + } } else { throw "unknown option php:" + iter->first; } @@ -93,6 +99,10 @@ public: escape_['$'] = "\\$"; } + virtual std::string indent_str() const { + return " "; + } + static bool is_valid_namespace(const std::string& sub_namespace); /** @@ -372,9 +382,9 @@ private: bool oop_; /** - * Whether to hold each class in separate file to allow PSR4-autoloading + * Whether to generate old-style PHP file to use classmap autoloading */ - bool psr4_; + bool classmap_; /** * Whether to generate validator code @@ -415,8 +425,8 @@ void t_php_generator::init_generator() { MKDIR(package_dir_.c_str()); } - // Prepare output file for all the types in non-psr4 mode - if (!psr4_) { + // Prepare output file for all the types in classmap mode + if (classmap_) { // Make output file string f_types_name = package_dir_ + "Types.php"; f_types_.open(f_types_name.c_str()); @@ -442,16 +452,15 @@ string t_php_generator::php_includes() { "use stdClass;\n"; } - return includes + "\n"; + return includes; } /** * Close up (or down) some filez. */ void t_php_generator::close_generator() { - if (!psr4_) { + if (classmap_) { // Close types file - f_types_ << endl; f_types_.close(); } } @@ -471,7 +480,8 @@ void t_php_generator::generate_typedef(t_typedef* ttypedef) { void t_php_generator::generate_service_header(t_service* tservice, std::ofstream& file) { file << "get_program()).empty()) { - file << "namespace " << php_namespace_suffix(tservice->get_program()) << ";" << endl; + file << "namespace " << php_namespace_suffix(tservice->get_program()) << ";" << endl + << endl; } file << autogen_comment() << php_includes(); @@ -484,7 +494,8 @@ void t_php_generator::generate_service_header(t_service* tservice, std::ofstream void t_php_generator::generate_program_header(std::ofstream& file) { file << "get_name() + ".php"; f_enum.open(f_enum_name.c_str()); generate_program_header(f_enum); @@ -512,26 +523,31 @@ void t_php_generator::generate_enum(t_enum* tenum) { // code but you can't do things like an 'extract' on it, which is a bit of // a downer. generate_php_doc(f_enum, tenum); - f_enum << "final class " << tenum->get_name() << " {" << endl; + f_enum << "final class " << tenum->get_name() << endl + << "{" << endl; indent_up(); for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) { int value = (*c_iter)->get_value(); generate_php_doc(f_enum, *c_iter); - indent(f_enum) << "const " << (*c_iter)->get_name() << " = " << value << ";" << endl; + indent(f_enum) << "const " << (*c_iter)->get_name() << " = " << value << ";" << endl + << endl; } indent(f_enum) << "static public $__names = array(" << endl; + + indent_up(); for (c_iter = constants.begin(); c_iter != constants.end(); ++c_iter) { int value = (*c_iter)->get_value(); - indent(f_enum) << " " << value << " => '" << (*c_iter)->get_name() << "'," << endl; + indent(f_enum) << value << " => '" << (*c_iter)->get_name() << "'," << endl; } + indent_down(); indent(f_enum) << ");" << endl; indent_down(); f_enum << "}" << endl << endl; - if (psr4_) { + if (!classmap_) { f_enum.close(); } } @@ -548,12 +564,13 @@ void t_php_generator::generate_consts(vector consts) { if (consts.size() > 0) { std::ofstream& f_consts = f_types_; - if (psr4_) { + if (!classmap_) { string f_consts_name = package_dir_ + "Constant.php"; f_consts.open(f_consts_name.c_str()); generate_program_header(f_consts); } - f_consts << "final class Constant extends \\Thrift\\Type\\TConstant {" << endl; + f_consts << "final class Constant extends \\Thrift\\Type\\TConstant"<< endl + << "{" << endl; indent_up(); @@ -570,7 +587,8 @@ void t_php_generator::generate_consts(vector consts) { f_consts << endl; - indent(f_consts) << "static protected function init_" << name << "() {" << endl; + f_consts << indent() << "protected static function init_" << name << "()" < consts) { } indent_down(); - f_consts << "}" << endl << endl; - if (psr4_) { + f_consts << "}" << endl; + if (!classmap_) { f_consts.close(); } } @@ -630,8 +648,8 @@ string t_php_generator::render_const_value(t_type* type, t_const_value* value) { indent_up(); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -655,8 +673,8 @@ string t_php_generator::render_const_value(t_type* type, t_const_value* value) { t_type* vtype = ((t_map*)type)->get_val_type(); out << "array(" << endl; indent_up(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { out << indent(); out << render_const_value(ktype, v_iter->first); @@ -713,13 +731,13 @@ void t_php_generator::generate_xception(t_struct* txception) { */ void t_php_generator::generate_php_struct(t_struct* tstruct, bool is_exception) { std::ofstream& f_struct = f_types_; - if (psr4_) { + if (!classmap_) { string f_struct_name = package_dir_ + tstruct->get_name() + ".php"; f_struct.open(f_struct_name.c_str()); generate_program_header(f_struct); } generate_php_struct_definition(f_struct, tstruct, is_exception); - if (psr4_) { + if (!classmap_) { f_struct.close(); } } @@ -771,7 +789,7 @@ void t_php_generator::generate_php_type_spec(ofstream& out, t_type* t) { * type information to generalize serialization routines. */ void t_php_generator::generate_php_struct_spec(ofstream& out, t_struct* tstruct) { - indent(out) << "static $_TSPEC = array(" << endl; + indent(out) << "static public $_TSPEC = array(" << endl; indent_up(); const vector& members = tstruct->get_members(); @@ -783,12 +801,12 @@ void t_php_generator::generate_php_struct_spec(ofstream& out, t_struct* tstruct) out << indent() << "'var' => '" << (*m_iter)->get_name() << "'," << endl; out << indent() << "'isRequired' => " << ((*m_iter)->get_req() == t_field::T_REQUIRED ? "true" : "false") << "," << endl; generate_php_type_spec(out, t); - indent(out) << ")," << endl; indent_down(); + indent(out) << ")," << endl; } indent_down(); - indent(out) << " );" << endl << endl; + indent(out) << ");" << endl << endl; } /** @@ -817,10 +835,11 @@ void t_php_generator::generate_php_struct_definition(ofstream& out, if (json_serializable_) { out << " implements JsonSerializable"; } - out << " {" << endl; + out << endl + << "{" << endl; indent_up(); - out << indent() << "static $isValidate = " << (validate_ ? "true" : "false") << ";" << endl << endl; + out << indent() << "static public $isValidate = " << (validate_ ? "true" : "false") << ";" << endl << endl; generate_php_struct_spec(out, tstruct); @@ -837,8 +856,9 @@ void t_php_generator::generate_php_struct_definition(ofstream& out, out << endl; // Generate constructor from array - string param = (members.size() > 0) ? "$vals=null" : ""; - out << indent() << "public function __construct(" << param << ") {" << endl; + string param = (members.size() > 0) ? "$vals = null" : ""; + out << indent() << "public function __construct(" << param << ")"<< endl + << indent() << "{" << endl; indent_up(); if (members.size() > 0) { @@ -855,9 +875,14 @@ void t_php_generator::generate_php_struct_definition(ofstream& out, out << indent() << "parent::__construct(self::$_TSPEC, $vals);" << endl; } else { for (m_iter = members.begin(); m_iter != members.end(); ++m_iter) { - out << indent() << "if (isset($vals['" << (*m_iter)->get_name() << "'])) {" << endl - << indent() << " $this->" << (*m_iter)->get_name() << " = $vals['" - << (*m_iter)->get_name() << "'];" << endl << indent() << "}" << endl; + out << indent() << "if (isset($vals['" << (*m_iter)->get_name() << "'])) {" << endl; + + indent_up(); + out << indent() << "$this->" << (*m_iter)->get_name() << " = $vals['" + << (*m_iter)->get_name() << "'];" << endl; + + indent_down(); + out << indent() << "}" << endl; } } indent_down(); @@ -866,23 +891,34 @@ void t_php_generator::generate_php_struct_definition(ofstream& out, scope_down(out); out << endl; - out << indent() << "public function getName() {" << endl << indent() << " return '" - << tstruct->get_name() << "';" << endl << indent() << "}" << endl << endl; + out << indent() << "public function getName()" << endl + << indent() << "{" << endl; + indent_up(); + out << indent() << "return '" << tstruct->get_name() << "';" << endl; + + indent_down(); + out << indent() << "}" << endl << endl; + + out << endl; generate_php_struct_reader(out, tstruct, is_result); + out << endl; generate_php_struct_writer(out, tstruct, is_result); if (needs_php_read_validator(tstruct, is_result)) { + out << endl; generate_php_struct_read_validator(out, tstruct); } if (needs_php_write_validator(tstruct, is_result)) { + out << endl; generate_php_struct_write_validator(out, tstruct); } if (json_serializable_) { + out << endl; generate_php_struct_json_serialize(out, tstruct, is_result); } indent_down(); - out << indent() << "}" << endl << endl; + out << indent() << "}" << endl; } /** @@ -919,9 +955,9 @@ void t_php_generator::generate_php_struct_reader(ofstream& out, t_struct* tstruc } // Loop over reading in fields - indent(out) << "while (true)" << endl; + indent(out) << "while (true) {" << endl; - scope_up(out); + indent_up(); // Read beginning field marker if (binary_inline_) { @@ -943,9 +979,9 @@ void t_php_generator::generate_php_struct_reader(ofstream& out, t_struct* tstruc } // Switch statement on the field we are reading - indent(out) << "switch ($fid)" << endl; + indent(out) << "switch ($fid) {" << endl; - scope_up(out); + indent_up(); // Generate deserialization code for known cases for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -956,25 +992,31 @@ void t_php_generator::generate_php_struct_reader(ofstream& out, t_struct* tstruc generate_deserialize_field(out, *f_iter, "this->"); indent_down(); out << indent() << "} else {" << endl; + + indent_up(); if (binary_inline_) { - indent(out) << " $xfer += " - << "TProtocol::skipBinary($input, $ftype);" << endl; + indent(out) << "$xfer += TProtocol::skipBinary($input, $ftype);" << endl; } else { - indent(out) << " $xfer += $input->skip($ftype);" << endl; + indent(out) << "$xfer += $input->skip($ftype);" << endl; } + + indent_down(); out << indent() << "}" << endl << indent() << "break;" << endl; indent_down(); } // In the default case we skip the field indent(out) << "default:" << endl; + + indent_up(); if (binary_inline_) { - indent(out) << " $xfer += " + indent(out) << "$xfer += " << "TProtocol::skipBinary($input, $ftype);" << endl; } else { - indent(out) << " $xfer += $input->skip($ftype);" << endl; + indent(out) << "$xfer += $input->skip($ftype);" << endl; } - indent(out) << " break;" << endl; + indent(out) << "break;" << endl; + indent_down(); scope_down(out); @@ -996,7 +1038,7 @@ void t_php_generator::generate_php_struct_reader(ofstream& out, t_struct* tstruc indent(out) << "return $xfer;" << endl; indent_down(); - out << indent() << "}" << endl << endl; + out << indent() << "}" << endl; } /** @@ -1008,10 +1050,11 @@ void t_php_generator::generate_php_struct_writer(ofstream& out, t_struct* tstruc vector::const_iterator f_iter; if (binary_inline_) { - indent(out) << "public function write(&$output) {" << endl; + indent(out) << "public function write(&$output)" << endl; } else { - indent(out) << "public function write($output) {" << endl; + indent(out) << "public function write($output)" << endl; } + indent(out) << "{" << endl; indent_up(); if (needs_php_write_validator(tstruct, is_result)) { @@ -1086,7 +1129,7 @@ void t_php_generator::generate_php_struct_writer(ofstream& out, t_struct* tstruc out << indent() << "return $xfer;" << endl; indent_down(); - out << indent() << "}" << endl << endl; + out << indent() << "}" << endl; } void t_php_generator::generate_php_struct_read_validator(ofstream& out, t_struct* tstruct) { @@ -1124,7 +1167,7 @@ void t_php_generator::generate_php_struct_required_validator(ofstream& out, } indent_down(); - indent(out) << "}" << endl << endl; + indent(out) << "}" << endl; } void t_php_generator::generate_php_struct_json_serialize(ofstream& out, @@ -1173,7 +1216,7 @@ void t_php_generator::generate_php_struct_json_serialize(ofstream& out, indent(out) << "return $json;" << endl; indent_down(); - indent(out) << "}" << endl << endl; + indent(out) << "}" << endl; } int t_php_generator::get_php_num_required_fields(const vector& fields, bool write_mode) { @@ -1207,7 +1250,7 @@ bool t_php_generator::needs_php_read_validator(t_struct* tstruct, bool is_result * @param tservice The service definition */ void t_php_generator::generate_service(t_service* tservice) { - if(!psr4_) { + if(classmap_) { string f_service_name = package_dir_ + service_name_ + ".php"; f_service_.open(f_service_name.c_str()); generate_service_header(tservice, f_service_); @@ -1224,7 +1267,7 @@ void t_php_generator::generate_service(t_service* tservice) { generate_service_processor(tservice); } - if(!psr4_) { + if(classmap_) { // Close service file f_service_ << endl; f_service_.close(); @@ -1238,7 +1281,7 @@ void t_php_generator::generate_service(t_service* tservice) { */ void t_php_generator::generate_service_processor(t_service* tservice) { std::ofstream& f_service_processor = f_service_; - if (psr4_) { + if (!classmap_) { string f_service_processor_name = package_dir_ + service_name_ + "Processor.php"; f_service_processor.open(f_service_processor_name.c_str()); generate_service_header(tservice, f_service_processor); @@ -1257,23 +1300,30 @@ void t_php_generator::generate_service_processor(t_service* tservice) { } // Generate the header portion - f_service_processor << "class " << service_name_ << "Processor" << extends_processor << " {" << endl; + f_service_processor << "class " << service_name_ << "Processor" << extends_processor << endl + << "{" << endl; indent_up(); if (extends.empty()) { f_service_processor << indent() << "protected $handler_ = null;" << endl; } - f_service_processor << indent() << "public function __construct($handler) {" << endl; + f_service_processor << indent() << "public function __construct($handler)"<< endl + << indent() << "{" << endl; + + indent_up(); if (extends.empty()) { - f_service_processor << indent() << " $this->handler_ = $handler;" << endl; + f_service_processor << indent() << "$this->handler_ = $handler;" << endl; } else { - f_service_processor << indent() << " parent::__construct($handler);" << endl; + f_service_processor << indent() << "parent::__construct($handler);" << endl; } + + indent_down(); f_service_processor << indent() << "}" << endl << endl; // Generate the server implementation - indent(f_service_processor) << "public function process($input, $output) {" << endl; + f_service_processor << indent() << "public function process($input, $output)" << endl + << indent() << "{" << endl; indent_up(); f_service_processor << indent() << "$rseqid = 0;" << endl << indent() << "$fname = null;" << endl @@ -1291,11 +1341,12 @@ void t_php_generator::generate_service_processor(t_service* tservice) { } // HOT: check for method implementation - f_service_processor << indent() << "$methodname = 'process_'.$fname;" << endl << indent() - << "if (!method_exists($this, $methodname)) {" << endl; + f_service_processor << indent() << "$methodname = 'process_'.$fname;" << endl + << indent() << "if (!method_exists($this, $methodname)) {" << endl; + + indent_up(); if (binary_inline_) { - f_service_processor << indent() << " throw new \\Exception('Function '.$fname.' not implemented.');" - << endl; + f_service_processor << indent() << "throw new \\Exception('Function '.$fname.' not implemented.');" << endl; } else { f_service_processor << indent() << " $input->skip(" << "TType::STRUCT);" << endl << indent() << " $input->readMessageEnd();" << endl @@ -1308,9 +1359,12 @@ void t_php_generator::generate_service_processor(t_service* tservice) { << endl << indent() << " $output->getTransport()->flush();" << endl << indent() << " return;" << endl; } - f_service_processor << indent() << "}" << endl << indent() - << "$this->$methodname($rseqid, $input, $output);" << endl << indent() - << "return true;" << endl; + + indent_down(); + f_service_processor << indent() << "}" << endl + << indent() << "$this->$methodname($rseqid, $input, $output);" << endl + << indent() << "return true;" << endl; + indent_down(); f_service_processor << indent() << "}" << endl << endl; @@ -1322,7 +1376,7 @@ void t_php_generator::generate_service_processor(t_service* tservice) { indent_down(); f_service_processor << "}" << endl; - if (psr4_) { + if (!classmap_) { f_service_processor.close(); } } @@ -1334,8 +1388,8 @@ void t_php_generator::generate_service_processor(t_service* tservice) { */ void t_php_generator::generate_process_function(std::ofstream& out, t_service* tservice, t_function* tfunction) { // Open function - indent(out) << "protected function process_" << tfunction->get_name() - << "($seqid, $input, $output) {" << endl; + out << indent() << "protected function process_" << tfunction->get_name() << "($seqid, $input, $output)" << endl + << indent() << "{" << endl; indent_up(); string argsname = php_namespace(tservice->get_program()) + service_name_ + "_" @@ -1346,21 +1400,32 @@ void t_php_generator::generate_process_function(std::ofstream& out, t_service* t out << indent() << "$bin_accel = ($input instanceof " << "TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary_after_message_begin');" << endl; - out << indent() << "if ($bin_accel)" << endl; - scope_up(out); + out << indent() << "if ($bin_accel) {" << endl; + indent_up(); - out << indent() << "$args = thrift_protocol_read_binary_after_message_begin($input, '" << argsname - << "', $input->isStrictRead());" << endl; + out << indent() << "$args = thrift_protocol_read_binary_after_message_begin(" <isStrictRead()" <read($input);" << endl; + + indent_down(); + out << indent() << "}" << endl; - scope_down(out); - out << indent() << "else" << endl; - scope_up(out); - out << indent() << "$args = new " << argsname << "();" << endl << indent() - << "$args->read($input);" << endl; if (!binary_inline_) { out << indent() << "$input->readMessageEnd();" << endl; } - scope_down(out); t_struct* xs = tfunction->get_xceptions(); const std::vector& xceptions = xs->get_members(); @@ -1428,16 +1493,25 @@ void t_php_generator::generate_process_function(std::ofstream& out, t_service* t << "TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');" << endl; - out << indent() << "if ($bin_accel)" << endl; - scope_up(out); + out << indent() << "if ($bin_accel) {" << endl; + indent_up(); - out << indent() << "thrift_protocol_write_binary($output, '" << tfunction->get_name() - << "', " - << "TMessageType::REPLY, $result, $seqid, $output->isStrictWrite());" << endl; + out << indent() << "thrift_protocol_write_binary(" << endl; - scope_down(out); - out << indent() << "else" << endl; - scope_up(out); + indent_up(); + out << indent() << "$output,"<get_name()<< "'," <isStrictWrite()"<::iterator f_iter; std::ofstream& f_struct_definition = f_service_; - if (!psr4_) { + if (classmap_) { f_struct_definition << "// HELPER FUNCTIONS AND STRUCTURES" << endl << endl; } @@ -1480,14 +1554,14 @@ void t_php_generator::generate_service_helpers(t_service* tservice) { string name = ts->get_name(); ts->set_name(service_name_ + "_" + name); - if (psr4_) { + if (!classmap_) { string f_struct_definition_name = package_dir_ + service_name_ + "_" + name + ".php"; f_struct_definition.open(f_struct_definition_name.c_str()); generate_service_header(tservice, f_struct_definition); } generate_php_struct_definition(f_struct_definition, ts); - if (psr4_) { + if (!classmap_) { f_struct_definition.close(); } @@ -1517,13 +1591,13 @@ void t_php_generator::generate_php_function_helpers(t_service* tservice, t_funct } std::ofstream& f_struct_helper = f_service_; - if (psr4_) { + if (!classmap_) { string f_struct_helper_name = package_dir_ + result.get_name() + ".php"; f_struct_helper.open(f_struct_helper_name.c_str()); generate_service_header(tservice, f_struct_helper); } generate_php_struct_definition(f_struct_helper, &result, false, true); - if (psr4_) { + if (!classmap_) { f_struct_helper.close(); } } @@ -1536,7 +1610,7 @@ void t_php_generator::generate_php_function_helpers(t_service* tservice, t_funct */ void t_php_generator::generate_service_interface(t_service* tservice) { std::ofstream& f_service_interface = f_service_; - if (psr4_) { + if (!classmap_) { string f_service_interface_name = package_dir_ + service_name_ + "If.php"; f_service_interface.open(f_service_interface_name.c_str()); generate_service_header(tservice, f_service_interface); @@ -1551,8 +1625,9 @@ void t_php_generator::generate_service_interface(t_service* tservice) { + tservice->get_extends()->get_name() + "If"; } generate_php_doc(f_service_interface, tservice); - f_service_interface << "interface " << php_namespace_declaration(tservice) << "If" << extends_if << " {" - << endl; + f_service_interface << "interface " << php_namespace_declaration(tservice) << "If" << extends_if << endl + << "{" << endl; + indent_up(); vector functions = tservice->get_functions(); vector::iterator f_iter; @@ -1561,11 +1636,10 @@ void t_php_generator::generate_service_interface(t_service* tservice) { indent(f_service_interface) << "public function " << function_signature(*f_iter) << ";" << endl; } indent_down(); - f_service_interface << "}" << endl << endl; + f_service_interface << "}" << endl; // Close service interface file - f_service_interface << endl; - if (psr4_) { + if (!classmap_) { f_service_interface.close(); } } @@ -1575,7 +1649,7 @@ void t_php_generator::generate_service_interface(t_service* tservice) { */ void t_php_generator::generate_service_rest(t_service* tservice) { std::ofstream& f_service_rest = f_service_; - if (psr4_) { + if (!classmap_) { string f_service_rest_name = package_dir_ + service_name_ + "Rest.php"; f_service_rest.open(f_service_rest_name.c_str()); generate_service_header(tservice, f_service_rest); @@ -1589,7 +1663,8 @@ void t_php_generator::generate_service_rest(t_service* tservice) { extends_if = " extends " + php_namespace(tservice->get_extends()->get_program()) + tservice->get_extends()->get_name() + "Rest"; } - f_service_rest << "class " << service_name_ << "Rest" << extends_if << " {" << endl; + f_service_rest << "class " << service_name_ << "Rest" << extends_if << endl + << "{" << endl; indent_up(); if (extends.empty()) { @@ -1643,7 +1718,7 @@ void t_php_generator::generate_service_rest(t_service* tservice) { // Close service rest file f_service_rest << endl; - if (psr4_) { + if (!classmap_) { f_service_rest.close(); } } @@ -1655,7 +1730,7 @@ void t_php_generator::generate_service_rest(t_service* tservice) { */ void t_php_generator::generate_service_client(t_service* tservice) { std::ofstream& f_service_client = f_service_; - if (psr4_) { + if (!classmap_) { string f_service_client_name = package_dir_ + service_name_ + "Client.php"; f_service_client.open(f_service_client_name.c_str()); generate_service_header(tservice, f_service_client); @@ -1670,8 +1745,8 @@ void t_php_generator::generate_service_client(t_service* tservice) { } f_service_client << "class " << php_namespace_declaration(tservice) << "Client" << extends_client - << " implements " << php_namespace(tservice->get_program()) << service_name_ << "If {" - << endl; + << " implements " << php_namespace(tservice->get_program()) << service_name_ << "If" << endl + <<"{"<< endl; indent_up(); // Private members @@ -1682,13 +1757,18 @@ void t_php_generator::generate_service_client(t_service* tservice) { } // Constructor function - f_service_client << indent() << "public function __construct($input, $output=null) {" << endl; + f_service_client << indent() << "public function __construct($input, $output = null)" << endl + << indent() << "{" << endl; + + indent_up(); if (!extends.empty()) { - f_service_client << indent() << " parent::__construct($input, $output);" << endl; + f_service_client << indent() << "parent::__construct($input, $output);" << endl; } else { - f_service_client << indent() << " $this->input_ = $input;" << endl << indent() - << " $this->output_ = $output ? $output : $input;" << endl; + f_service_client << indent() << "$this->input_ = $input;" << endl + << indent() << "$this->output_ = $output ? $output : $input;" << endl; } + + indent_down(); f_service_client << indent() << "}" << endl << endl; // Generate client method implementations @@ -1700,6 +1780,8 @@ void t_php_generator::generate_service_client(t_service* tservice) { vector::const_iterator fld_iter; string funname = (*f_iter)->get_name(); + f_service_client << endl; + // Open function indent(f_service_client) << "public function " << function_signature(*f_iter) << endl; scope_up(f_service_client); @@ -1743,18 +1825,27 @@ void t_php_generator::generate_service_client(t_service* tservice) { << "TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');" << endl; - f_service_client << indent() << "if ($bin_accel)" << endl; - scope_up(f_service_client); + f_service_client << indent() << "if ($bin_accel) {" << endl; + indent_up(); string messageType = (*f_iter)->is_oneway() ? "TMessageType::ONEWAY" : "TMessageType::CALL"; - f_service_client << indent() << "thrift_protocol_write_binary($this->output_, '" - << (*f_iter)->get_name() << "', " << messageType - << ", $args, $this->seqid_, $this->output_->isStrictWrite());" << endl; + f_service_client << indent() << "thrift_protocol_write_binary(" << endl; - scope_down(f_service_client); - f_service_client << indent() << "else" << endl; - scope_up(f_service_client); + indent_up(); + f_service_client << indent() << "$this->output_," << endl + << indent() << "'" << (*f_iter)->get_name() << "'," << endl + << indent() << messageType << "," << endl + << indent() << "$args," << endl + << indent() << "$this->seqid_," << endl + << indent() << "$this->output_->isStrictWrite()" << endl; + + indent_down(); + f_service_client << indent() << ");" << endl; + + indent_down(); + f_service_client << indent() << "} else {" << endl; + indent_up(); // Serialize the request header if (binary_inline_) { @@ -1799,14 +1890,26 @@ void t_php_generator::generate_service_client(t_service* tservice) { << "TBinaryProtocolAccelerated)" << " && function_exists('thrift_protocol_read_binary');" << endl; - f_service_client << indent() - << "if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '" - << resultname << "', $this->input_->isStrictRead());" << endl; - f_service_client << indent() << "else" << endl; - scope_up(f_service_client); + f_service_client << indent() << "if ($bin_accel) {" << endl; - f_service_client << indent() << "$rseqid = 0;" << endl << indent() << "$fname = null;" << endl - << indent() << "$mtype = 0;" << endl << endl; + indent_up(); + f_service_client << indent() << "$result = thrift_protocol_read_binary(" << endl; + + indent_up(); + f_service_client << indent() << "$this->input_," << endl + << indent() << "'" << resultname << "'," << endl + << indent() << "$this->input_->isStrictRead()" << endl; + + indent_down(); + f_service_client << indent() << ");" << endl; + + indent_down(); + f_service_client << indent() << "} else {" << endl; + + indent_up(); + f_service_client << indent() << "$rseqid = 0;" << endl + << indent() << "$fname = null;" << endl + << indent() << "$mtype = 0;" << endl << endl; if (binary_inline_) { t_field ffname(g_type_string, "fname"); @@ -1820,16 +1923,20 @@ void t_php_generator::generate_service_client(t_service* tservice) { generate_deserialize_field(f_service_client, &ffname, "", true); generate_deserialize_field(f_service_client, &fseqid, "", true); } else { - f_service_client << indent() << "$this->input_->readMessageBegin($fname, $mtype, $rseqid);" - << endl << indent() << "if ($mtype == " - << "TMessageType::EXCEPTION) {" << endl << indent() << " $x = new " - << "TApplicationException();" << endl << indent() << " $x->read($this->input_);" - << endl << indent() << " $this->input_->readMessageEnd();" << endl << indent() - << " throw $x;" << endl << indent() << "}" << endl; + f_service_client << indent() << "$this->input_->readMessageBegin($fname, $mtype, $rseqid);" << endl + << indent() << "if ($mtype == TMessageType::EXCEPTION) {" << endl; + + indent_up(); + f_service_client << indent() << "$x = new TApplicationException();" << endl + << indent() << "$x->read($this->input_);" << endl + << indent() << "$this->input_->readMessageEnd();" << endl + << indent() << "throw $x;" << endl; + indent_down(); + f_service_client << indent() << "}" << endl; } - f_service_client << indent() << "$result = new " << resultname << "();" << endl << indent() - << "$result->read($this->input_);" << endl; + f_service_client << indent() << "$result = new " << resultname << "();" << endl + << indent() << "$result->read($this->input_);" << endl; if (!binary_inline_) { f_service_client << indent() << "$this->input_->readMessageEnd();" << endl; @@ -1839,17 +1946,26 @@ void t_php_generator::generate_service_client(t_service* tservice) { // Careful, only return result if not a void function if (!(*f_iter)->get_returntype()->is_void()) { - f_service_client << indent() << "if ($result->success !== null) {" << endl << indent() - << " return $result->success;" << endl << indent() << "}" << endl; + f_service_client << indent() << "if ($result->success !== null) {" << endl; + + indent_up(); + f_service_client << indent() << "return $result->success;" << endl; + + indent_down(); + f_service_client << indent() << "}" << endl; } t_struct* xs = (*f_iter)->get_xceptions(); const std::vector& xceptions = xs->get_members(); vector::const_iterator x_iter; for (x_iter = xceptions.begin(); x_iter != xceptions.end(); ++x_iter) { - f_service_client << indent() << "if ($result->" << (*x_iter)->get_name() << " !== null) {" << endl - << indent() << " throw $result->" << (*x_iter)->get_name() << ";" << endl - << indent() << "}" << endl; + f_service_client << indent() << "if ($result->" << (*x_iter)->get_name() << " !== null) {" << endl; + + indent_up(); + f_service_client << indent() << "throw $result->" << (*x_iter)->get_name() << ";" << endl; + + indent_down(); + f_service_client << indent() << "}" << endl; } // Careful, only return _result if not a void function @@ -1862,16 +1978,14 @@ void t_php_generator::generate_service_client(t_service* tservice) { // Close function scope_down(f_service_client); - f_service_client << endl; } } indent_down(); - f_service_client << "}" << endl << endl; + f_service_client << "}" << endl; // Close service client file - f_service_client << endl; - if (psr4_) { + if (!classmap_) { f_service_client.close(); } } @@ -2065,9 +2179,9 @@ void t_php_generator::generate_deserialize_container(ofstream& out, t_type* ttyp // For loop iterates over elements string i = tmp("_i"); - indent(out) << "for ($" << i << " = 0; $" << i << " < $" << size << "; ++$" << i << ")" << endl; + indent(out) << "for ($" << i << " = 0; $" << i << " < $" << size << "; ++$" << i << ") {" << endl; - scope_up(out); + indent_up(); if (ttype->is_map()) { generate_deserialize_map_element(out, (t_map*)ttype, prefix); @@ -2258,8 +2372,6 @@ void t_php_generator::generate_serialize_struct(ofstream& out, t_struct* tstruct * Writes out a container */ void t_php_generator::generate_serialize_container(ofstream& out, t_type* ttype, string prefix) { - scope_up(out); - if (ttype->is_map()) { if (binary_inline_) { out << indent() << "$output .= pack('c', " << type_to_enum(((t_map*)ttype)->get_key_type()) @@ -2295,21 +2407,19 @@ void t_php_generator::generate_serialize_container(ofstream& out, t_type* ttype, } } - scope_up(out); - if (ttype->is_map()) { string kiter = tmp("kiter"); string viter = tmp("viter"); indent(out) << "foreach ($" << prefix << " as " - << "$" << kiter << " => $" << viter << ")" << endl; - scope_up(out); + << "$" << kiter << " => $" << viter << ") {" << endl; + indent_up(); generate_serialize_map_element(out, (t_map*)ttype, kiter, viter); scope_down(out); } else if (ttype->is_set()) { string iter = tmp("iter"); string iter_val = tmp("iter"); - indent(out) << "foreach ($" << prefix << " as $" << iter << " => $" << iter_val << ")" << endl; - scope_up(out); + indent(out) << "foreach ($" << prefix << " as $" << iter << " => $" << iter_val << ") {" << endl; + indent_up(); t_type* elem_type = ((t_set*)ttype)->get_elem_type(); if(php_is_scalar(elem_type)) { @@ -2320,14 +2430,12 @@ void t_php_generator::generate_serialize_container(ofstream& out, t_type* ttype, scope_down(out); } else if (ttype->is_list()) { string iter = tmp("iter"); - indent(out) << "foreach ($" << prefix << " as $" << iter << ")" << endl; - scope_up(out); + indent(out) << "foreach ($" << prefix << " as $" << iter << ") {" << endl; + indent_up(); generate_serialize_list_element(out, (t_list*)ttype, iter); scope_down(out); } - scope_down(out); - if (!binary_inline_) { if (ttype->is_map()) { indent(out) << "$output->writeMapEnd();" << endl; @@ -2337,8 +2445,6 @@ void t_php_generator::generate_serialize_container(ofstream& out, t_type* ttype, indent(out) << "$output->writeListEnd();" << endl; } } - - scope_down(out); } /** @@ -2672,7 +2778,7 @@ THRIFT_REGISTER_GENERATOR( " inlined: Generate PHP inlined files\n" " server: Generate PHP server stubs\n" " oop: Generate PHP with object oriented subclasses\n" - " psr4: Generate each PHP class in separate file (allows PSR4 autoloading)\n" + " classmap: Generate old-style PHP files (use classmap autoloading)\n" " rest: Generate PHP REST processors\n" " nsglobal=NAME: Set global namespace\n" " validate: Generate PHP validator methods\n" diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_py_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_py_generator.cc index 9078da842..caa04a869 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_py_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_py_generator.cc @@ -19,7 +19,9 @@ #include #include +#include #include +#include #include #include @@ -541,9 +543,9 @@ string t_py_generator::render_const_value(t_type* type, t_const_value* value) { break; case t_base_type::TYPE_DOUBLE: if (value->get_type() == t_const_value::CV_INTEGER) { - out << value->get_integer(); + out << "float(" << value->get_integer() << ")"; } else { - out << value->get_double(); + out << emit_double_as_string(value->get_double()); } break; default: @@ -556,8 +558,8 @@ string t_py_generator::render_const_value(t_type* type, t_const_value* value) { indent_up(); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -581,8 +583,8 @@ string t_py_generator::render_const_value(t_type* type, t_const_value* value) { } out << "{" << endl; indent_up(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { indent(out) << render_const_value(ktype, v_iter->first) << ": " << render_const_value(vtype, v_iter->second) << "," << endl; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rb_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rb_generator.cc index 924f6f6eb..3f2b78e6a 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rb_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rb_generator.cc @@ -455,8 +455,8 @@ t_rb_ofstream& t_rb_generator::render_const_value(t_rb_ofstream& out, out.indent_up(); const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; for (f_iter = fields.begin(); f_iter != fields.end(); ++f_iter) { @@ -478,8 +478,8 @@ t_rb_ofstream& t_rb_generator::render_const_value(t_rb_ofstream& out, t_type* vtype = ((t_map*)type)->get_val_type(); out << "{" << endl; out.indent_up(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { out.indent(); render_const_value(out, ktype, v_iter->first) << " => "; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rs_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rs_generator.cc index 28c57f836..6001d8f66 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rs_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_rs_generator.cc @@ -781,8 +781,8 @@ void t_rs_generator::render_const_map(t_type* ttype, t_const_value* tvalue) { << to_rust_type(key_type) << ", " << to_rust_type(val_type) << "> = BTreeMap::new();" << endl; - const map& elems = tvalue->get_map(); - map::const_iterator elem_iter; + const map& elems = tvalue->get_map(); + map::const_iterator elem_iter; for (elem_iter = elems.begin(); elem_iter != elems.end(); ++elem_iter) { t_const_value* key_value = elem_iter->first; t_const_value* val_value = elem_iter->second; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_st_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_st_generator.cc index 69ed776a7..c45666ad9 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_st_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_st_generator.cc @@ -402,8 +402,8 @@ string t_st_generator::render_const_value(t_type* type, t_const_value* value) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { t_type* field_type = NULL; @@ -428,8 +428,8 @@ string t_st_generator::render_const_value(t_type* type, t_const_value* value) { out << "(Dictionary new" << endl; indent_up(); indent_up(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { out << indent() << indent(); out << "at: " << render_const_value(ktype, v_iter->first); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_swift_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_swift_generator.cc index 87dd2f020..7b4d22451 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_swift_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_swift_generator.cc @@ -1900,8 +1900,8 @@ void t_swift_generator::render_const_value(ostream& out, const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (f_iter = fields.begin(); f_iter != fields.end();) { t_field* tfield = *f_iter; @@ -1934,8 +1934,8 @@ void t_swift_generator::render_const_value(ostream& out, t_type* ktype = ((t_map*)type)->get_key_type(); t_type* vtype = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end();) { @@ -1956,8 +1956,8 @@ void t_swift_generator::render_const_value(ostream& out, t_type* etype = ((t_list*)type)->get_elem_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end();) { @@ -1976,8 +1976,8 @@ void t_swift_generator::render_const_value(ostream& out, t_type* etype = ((t_set*)type)->get_elem_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end();) { diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_xml_generator.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_xml_generator.cc index e7e01fd8c..a832afd58 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_xml_generator.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/t_xml_generator.cc @@ -483,8 +483,8 @@ void t_xml_generator::write_const_value(t_const_value* value) { case t_const_value::CV_MAP: { write_element_start("map"); - std::map map = value->get_map(); - std::map::iterator mit; + std::map map = value->get_map(); + std::map::iterator mit; for (mit = map.begin(); mit != map.end(); ++mit) { write_element_start("entry"); write_element_start("key"); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/thrift-t_php_generator.o-a60a38e9 b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/generate/thrift-t_php_generator.o-a60a38e9 new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/main.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/main.cc index 84218405b..0c21e0281 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/main.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/main.cc @@ -805,8 +805,8 @@ void validate_const_rec(std::string name, t_type* type, t_const_value* value) { const vector& fields = ((t_struct*)type)->get_members(); vector::const_iterator f_iter; - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { if (v_iter->first->get_type() != t_const_value::CV_STRING) { throw "type error: " + name + " struct key must be string"; @@ -826,8 +826,8 @@ void validate_const_rec(std::string name, t_type* type, t_const_value* value) { } else if (type->is_map()) { t_type* k_type = ((t_map*)type)->get_key_type(); t_type* v_type = ((t_map*)type)->get_val_type(); - const map& val = value->get_map(); - map::const_iterator v_iter; + const map& val = value->get_map(); + map::const_iterator v_iter; for (v_iter = val.begin(); v_iter != val.end(); ++v_iter) { validate_const_rec(name + "", k_type, v_iter->first); validate_const_rec(name + "", v_type, v_iter->second); diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_const_value.h b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_const_value.h index fc2f648e8..6a114cf16 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_const_value.h +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_const_value.h @@ -38,6 +38,17 @@ void convert(From*, To&); */ class t_const_value { public: + /** + * Comparator to sort fields in ascending order by key. + * Make this a functor instead of a function to help GCC inline it. + */ + struct value_compare { + public: + bool operator()(t_const_value const* const& left, t_const_value const* const& right) const { + return *left < *right; + } + }; + enum t_const_value_type { CV_INTEGER, CV_DOUBLE, CV_STRING, CV_MAP, CV_LIST, CV_IDENTIFIER, CV_UNKNOWN }; t_const_value() : intVal_(0), doubleVal_(0.0f), enum_((t_enum*)0), valType_(CV_UNKNOWN) {} @@ -90,7 +101,7 @@ public: void add_map(t_const_value* key, t_const_value* val) { mapVal_[key] = val; } - const std::map& get_map() const { return mapVal_; } + const std::map& get_map() const { return mapVal_; } void set_list() { valType_ = CV_LIST; } @@ -136,8 +147,55 @@ public: t_const_value_type get_type() const { if (valType_ == CV_UNKNOWN) { throw std::string("unknown t_const_value"); } return valType_; } + /** + * Comparator to sort map fields in ascending order by key and then value. + * This is used for map comparison in lexicographic order. + */ + struct map_entry_compare { + private: + typedef std::pair ConstPair; + public: + bool operator()(ConstPair left, ConstPair right) const { + if (*(left.first) < *(right.first)) { + return true; + } else { + if (*(right.first) < *(left.first)) { + return false; + } else { + return *(left.second) < *(right.second); + } + } + } + }; + + bool operator < (const t_const_value& that) const { + ::t_const_value::t_const_value_type t1 = get_type(); + ::t_const_value::t_const_value_type t2 = that.get_type(); + if (t1 != t2) + return t1 < t2; + switch (t1) { + case ::t_const_value::CV_INTEGER: + return intVal_ < that.intVal_; + case ::t_const_value::CV_DOUBLE: + return doubleVal_ < that.doubleVal_; + case ::t_const_value::CV_STRING: + return stringVal_ < that.stringVal_; + case ::t_const_value::CV_IDENTIFIER: + return identifierVal_ < that.identifierVal_; + case ::t_const_value::CV_MAP: + return std::lexicographical_compare( + mapVal_.begin(), mapVal_.end(), that.mapVal_.begin(), that.mapVal_.end(), map_entry_compare()); + case ::t_const_value::CV_LIST: + return std::lexicographical_compare( + listVal_.begin(), listVal_.end(), that.listVal_.begin(), that.listVal_.end(), value_compare()); + case ::t_const_value::CV_UNKNOWN: + default: + throw "unknown value type"; + } + } + private: - std::map mapVal_; + std::map mapVal_; std::vector listVal_; std::string stringVal_; int64_t intVal_; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_scope.h b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_scope.h index 02aa550bf..6f160a5cc 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_scope.h +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/parse/t_scope.h @@ -76,8 +76,8 @@ public: void resolve_const_value(t_const_value* const_val, t_type* ttype) { if (ttype->is_map()) { - const std::map& map = const_val->get_map(); - std::map::const_iterator v_iter; + const std::map& map = const_val->get_map(); + std::map::const_iterator v_iter; for (v_iter = map.begin(); v_iter != map.end(); ++v_iter) { resolve_const_value(v_iter->first, ((t_map*)ttype)->get_key_type()); resolve_const_value(v_iter->second, ((t_map*)ttype)->get_val_type()); @@ -96,8 +96,8 @@ public: } } else if (ttype->is_struct()) { t_struct* tstruct = (t_struct*)ttype; - const std::map& map = const_val->get_map(); - std::map::const_iterator v_iter; + const std::map& map = const_val->get_map(); + std::map::const_iterator v_iter; for (v_iter = map.begin(); v_iter != map.end(); ++v_iter) { t_field* field = tstruct->get_field_by_name(v_iter->first->get_string()); if (field == NULL) { @@ -137,8 +137,8 @@ public: throw "Constants cannot be of type VOID"; } } else if (const_type->is_map()) { - const std::map& map = constant->get_value()->get_map(); - std::map::const_iterator v_iter; + const std::map& map = constant->get_value()->get_map(); + std::map::const_iterator v_iter; const_val->set_map(); for (v_iter = map.begin(); v_iter != map.end(); ++v_iter) { diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.cc index 0bac1352b..ca5d28771 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.cc @@ -124,6 +124,11 @@ struct TypeCache { std::map const* source; + void clear() { + source = nullptr ; + cache.clear() ; + } + protected: std::map cache; @@ -141,6 +146,12 @@ TypeCache< ::t_type, t_type> g_type_cache; TypeCache< ::t_const, t_const> g_const_cache; TypeCache< ::t_service, t_service> g_service_cache; +void clear_global_cache() { + g_type_cache.clear(); + g_const_cache.clear(); + g_service_cache.clear(); +} + void set_global_cache(const TypeRegistry& from) { g_type_cache.source = &from.types; g_const_cache.source = &from.constants; @@ -258,11 +269,11 @@ THRIFT_CONVERSION(t_const_value, ) { T_CONST_VALUE_CASE(string); else T_CONST_VALUE_CASE(integer); else T_CONST_VALUE_CASE(double); - else { - T_CONST_VALUE_CASE(identifier); - if (from.__isset.enum_val) - to->set_enum(resolve_type< ::t_enum>(from.enum_val)); + else if (from.__isset.const_identifier_val) { + to->set_identifier(from.const_identifier_val.identifier_val) ; + to->set_enum(resolve_type< ::t_enum>(from.const_identifier_val.enum_val)) ; } + #undef T_CONST_VALUE_CASE } THRIFT_CONVERSION(t_field, resolve_type< ::t_type>(from.type), from.name, from.key) { @@ -458,9 +469,7 @@ int GeneratorPlugin::exec(int, char* []) { return ::t_const_value::CV_INTEGER; if (v.__isset.double_val) return ::t_const_value::CV_DOUBLE; - if (v.__isset.identifier_val) - return ::t_const_value::CV_IDENTIFIER; - if (v.__isset.enum_val) + if (v.__isset.const_identifier_val) return ::t_const_value::CV_IDENTIFIER; throw ThriftPluginError("Unknown const value type"); } diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.thrift b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.thrift index 1e51310d8..6d98f9955 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.thrift +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin.thrift @@ -105,15 +105,20 @@ enum Requiredness { T_OPT_IN_REQ_OUT = 2 } +struct t_const_identifier_value { + 1: required string identifier_val + 2: required t_type_id enum_val +} + union t_const_value { 1: optional map map_val 2: optional list list_val 3: optional string string_val 4: optional i64 integer_val 5: optional double double_val - 6: optional string identifier_val - 7: optional t_type_id enum_val + 8: optional t_const_identifier_value const_identifier_val } + struct t_const { 1: required string name 2: required t_type_id type diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin_output.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin_output.cc index 75725a1c0..81b9a2aa6 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin_output.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/plugin_output.cc @@ -99,29 +99,62 @@ using namespace apache::thrift; #define THRIFT_ASSIGN_METADATA() convert(reinterpret_cast(from), to.metadata) +// a generator of sequential unique identifiers for addresses -- so +// that the TypeCache below can use those IDs instead of +// addresses. This allows GeneratorInput's various +// t_{program,type,etc}_id types to be dense consecutively-numbered +// integers, instead of large random-seeming integers. +// +// Furthermore, this allows GeneratorInput to be deterministic (no +// addresses, so no pseudo-randomness) and that means reproducibility +// of output. +const int64_t ONE_MILLION = 1000 * 1000; +class id_generator { +public: + id_generator() : addr2id_(), next_id_(ONE_MILLION) {} + + void clear() { + addr2id_.clear() ; + next_id_ = ONE_MILLION ; + } + + int64_t gensym(const int64_t addr) { + if (!addr) return 0L ; + std::map::iterator it = addr2id_.find(addr); + if (it != addr2id_.end()) return it->second ; + int64_t id = next_id_++ ; + addr2id_.insert(std::make_pair(addr, id)) ; + return id ; + } + + std::map addr2id_ ; + int64_t next_id_ ; +} ; + // To avoid multiple instances of same type, t_type, t_const and t_service are stored in one place // and referenced by ID. template struct TypeCache { typedef typename plugin::ToType::type to_type; + id_generator idgen ; std::map cache; template int64_t store(T2* t) { - intptr_t id = reinterpret_cast(t); - if (id) { - typename std::map::iterator it = cache.find(id); - if (it == cache.end()) { - // HACK: fake resolve for recursive type - cache.insert(std::make_pair(id, to_type())); - // overwrite with true value - cache[id] = convert(t); - } - } - return static_cast(id); + intptr_t addr = reinterpret_cast(t); + if (!addr) return 0L ; + + int64_t id = idgen.gensym(addr) ; + if (cache.end() != cache.find(id)) return id ; + + // HACK: fake resolve for recursive type + cache.insert(std::make_pair(id, to_type())); + // overwrite with true value + cache[id] = convert(t); + return id ; } - void clear() { cache.clear(); } + void clear() { cache.clear() ; idgen.clear(); } }; template @@ -137,6 +170,8 @@ T_STORE(type) T_STORE(const) T_STORE(service) #undef T_STORE +// this id_generator is for gensymm-ing t_program_id +id_generator program_cache ; #define THRIFT_ASSIGN_ID_N(t, from_name, to_name) \ do { \ @@ -157,7 +192,7 @@ T_STORE(service) } while (0) THRIFT_CONVERSION_N(::t_type, plugin::TypeMetadata) { - to.program_id = reinterpret_cast(from->get_program()); + to.program_id = program_cache.gensym(reinterpret_cast(from->get_program())); THRIFT_ASSIGN_N(annotations_, annotations, ); if (from->has_doc()) { to.__set_doc(from->get_doc()); @@ -195,8 +230,13 @@ THRIFT_CONVERSION(t_const_value) { THRIFT_ASSIGN_N(get_string(), string_val, ); break; case t_const_value::CV_IDENTIFIER: - THRIFT_ASSIGN_ID_N(t_type, enum_, enum_val); - THRIFT_ASSIGN_N(get_identifier(), identifier_val, ); + if (from) { + apache::thrift::plugin::t_const_identifier_value cidval ; + if (from->enum_) + cidval.__set_enum_val(store_type(from->enum_)); + cidval.__set_identifier_val(from->get_identifier()); + to.__set_const_identifier_val(cidval) ; + } break; case t_const_value::CV_MAP: to.__isset.map_val = true; @@ -341,6 +381,7 @@ void clear_global_cache() { type_cache.clear(); const_cache.clear(); service_cache.clear(); + program_cache.clear() ; } THRIFT_CONVERSION(t_program) { @@ -360,7 +401,7 @@ THRIFT_CONVERSION(t_program) { THRIFT_ASSIGN_LIST_ID(t_const, const); THRIFT_ASSIGN_LIST_ID(t_service, service); THRIFT_ASSIGN_LIST_N(t_program, get_includes(), includes); - to.program_id = reinterpret_cast(from); + to.program_id = program_cache.gensym(reinterpret_cast(from)); } PluginDelegateResult delegateToPlugin(t_program* program, const std::string& options) { @@ -410,3 +451,4 @@ PluginDelegateResult delegateToPlugin(t_program* program, const std::string& opt return PLUGIN_NOT_FOUND; } } + diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/type_util.h b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/type_util.h index 508b74181..996b5c666 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/type_util.h +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/plugin/type_util.h @@ -38,6 +38,7 @@ typename ToType::type* convert(const From& from); class TypeRegistry; void set_global_cache(const TypeRegistry&); +void clear_global_cache(); } } } diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/thrifty.yy b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/thrifty.yy index e4cae0c29..df34adf04 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/thrifty.yy +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/src/thrift/thrifty.yy @@ -35,6 +35,7 @@ #define __STDC_FORMAT_MACROS #endif #include +#include #ifndef _MSC_VER #include #else diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/test/Makefile.am b/vendor/git.apache.org/thrift.git/compiler/cpp/test/Makefile.am index 5a232029a..b7fc91d35 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/test/Makefile.am +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/test/Makefile.am @@ -32,6 +32,8 @@ check_PROGRAMS = plugintest noinst_PROGRAMS = thrift-gen-mycpp +all-local: thrift-gen-bincat + AM_CPPFLAGS += -I$(top_srcdir)/lib/cpp/src -I$(top_builddir)/lib/cpp/src plugintest_SOURCES = plugin/conversion_test.cc @@ -43,9 +45,16 @@ thrift_gen_mycpp_CPPFLAGS = $(AM_CPPFLAGS) -I$(top_srcdir)/compiler/cpp -I$(top_ thrift_gen_mycpp_LDADD = $(top_builddir)/compiler/cpp/libthriftc.la cpp_plugin_test.sh: thrift-gen-mycpp -TESTS = $(check_PROGRAMS) cpp_plugin_test.sh + +thrift-gen-bincat: + cp bincat.sh $@ + chmod 755 $@ + +plugin_stability_test.sh: thrift-gen-bincat + +TESTS = $(check_PROGRAMS) cpp_plugin_test.sh plugin_stability_test.sh clean-local: - $(RM) -rf gen-cpp gen-mycpp + $(RM) -rf gen-cpp gen-mycpp gen-bincat thrift-gen-bincat endif diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/test/bincat.sh b/vendor/git.apache.org/thrift.git/compiler/cpp/test/bincat.sh new file mode 100755 index 000000000..c7f90785e --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/test/bincat.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +exec /bin/cat diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin/conversion_test.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin/conversion_test.cc index 5159ba484..3c8d812cf 100644 --- a/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin/conversion_test.cc +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin/conversion_test.cc @@ -234,6 +234,8 @@ void migrate_global_cache() { template T* round_trip(T* t) { typename plugin::ToType::type p; + plugin::clear_global_cache(); + plugin_output::clear_global_cache(); plugin_output::convert(t, p); migrate_global_cache(); return plugin::convert(p); @@ -275,12 +277,12 @@ void test_const_value(t_const_value* sut) { BOOST_CHECK_EQUAL(sut->get_map().size(), sut2->get_map().size()); { std::map sut_values; - for (std::map::const_iterator it = sut->get_map().begin(); + for (std::map::const_iterator it = sut->get_map().begin(); it != sut->get_map().end(); it++) { sut_values[it->first->get_type()] = it->second->get_type(); } std::map sut2_values; - for (std::map::const_iterator it = sut2->get_map().begin(); + for (std::map::const_iterator it = sut2->get_map().begin(); it != sut2->get_map().end(); it++) { sut2_values[it->first->get_type()] = it->second->get_type(); } diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin_stability_test.sh b/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin_stability_test.sh new file mode 100755 index 000000000..eb7c93d66 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/test/plugin_stability_test.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# this file is intended to be invoked by make. +# +# This file runs the compiler twice, using a plugin that just invokes +# /bin/cat, and compares the output. If GeneratorInput is +# nondeterminsitic, you'd expect the output to differ from run-to-run. +# So this tests that in fact, the output is stable from run-to-run. +set -e +mkdir -p gen-bincat +PATH=.:"$PATH" ../thrift -r -gen bincat ../../../test/Include.thrift > gen-bincat/1.ser +PATH=.:"$PATH" ../thrift -r -gen bincat ../../../test/Include.thrift > gen-bincat/2.ser +diff --binary gen-bincat/1.ser gen-bincat/2.ser diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/CMakeLists.txt b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/CMakeLists.txt new file mode 100644 index 000000000..e2b100c7a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/CMakeLists.txt @@ -0,0 +1,153 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +cmake_minimum_required(VERSION 2.8.12) + +project(thrift_compiler_tests) + +set(THRIFT_COMPILER_SOURCE_DIR + ${CMAKE_CURRENT_SOURCE_DIR}/.. +) + +# don't generate ZERO_CHECK +set(CMAKE_SUPPRESS_REGENERATION true) + +configure_file(${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/version.h.in ${CMAKE_CURRENT_BINARY_DIR}/thrift/version.h) +if(MSVC) + # The winflexbison generator outputs some macros that conflict with the Visual Studio 2010 copy of stdint.h + # This might be fixed in later versions of Visual Studio, but an easy solution is to include stdint.h first + if(HAVE_STDINT_H) + add_definitions(-D__STDC_LIMIT_MACROS) + add_definitions(/FI"stdint.h") + endif(HAVE_STDINT_H) +endif() + +find_package(FLEX REQUIRED) +find_package(BISON REQUIRED) + +# create directory for thrifty and thriftl +file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/thrift/) + +# Create flex and bison files and build the lib parse static library +BISON_TARGET(thrifty ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/thrifty.yy ${CMAKE_CURRENT_BINARY_DIR}/thrift/thrifty.cc) +FLEX_TARGET(thriftl ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/thriftl.ll ${CMAKE_CURRENT_BINARY_DIR}/thrift/thriftl.cc) +ADD_FLEX_BISON_DEPENDENCY(thriftl thrifty) + +set(parse_SOURCES + ${CMAKE_CURRENT_BINARY_DIR}/thrift/thrifty.cc + ${CMAKE_CURRENT_BINARY_DIR}/thrift/thriftl.cc + ${CMAKE_CURRENT_BINARY_DIR}/thrift/thrifty.hh +) + +add_library(parse STATIC ${parse_SOURCES}) + +# Thrift compiler tests +set(thrift_compiler_tests +) + +# you can add some files manually there +set(thrift_compiler_tests_manual_SOURCES + # tests file to avoid main in every test file + ${CMAKE_CURRENT_SOURCE_DIR}/tests_main.cc +) + +# set variable for tests sources - will be filled later +set(thrift_compiler_tests_SOURCES +) + +set(thrift_compiler_SOURCES + ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/logging.cc # we use logging instead of main to avoid breaking compillation (2 main v) + ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/audit/t_audit.cpp + ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/common.cc + ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/generate/t_generator.cc + ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/parse/t_typedef.cc + ${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/parse/parse.cc + ${CMAKE_CURRENT_BINARY_DIR}/thrift/version.h +) + +# This macro adds an option THRIFT_COMPILER_${NAME} +# that allows enabling or disabling certain languages +macro(THRIFT_ADD_COMPILER name description initial) + string(TOUPPER "THRIFT_COMPILER_${name}" enabler) + set(src "${THRIFT_COMPILER_SOURCE_DIR}/src/thrift/generate/t_${name}_generator.cc") + option(${enabler} ${description} ${initial}) + if(${enabler}) + list(APPEND thrift_compiler_SOURCES ${src}) + file(GLOB thrift_compiler_tests_SOURCES + "${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.c*" + "${CMAKE_CURRENT_SOURCE_DIR}/${name}/*.thrift" + ) + endif() +endmacro() + +# The following compiler with unit tests can be enabled or disabled +THRIFT_ADD_COMPILER(c_glib "Enable compiler for C with Glib" OFF) +THRIFT_ADD_COMPILER(cpp "Enable compiler for C++" OFF) +THRIFT_ADD_COMPILER(java "Enable compiler for Java" OFF) +THRIFT_ADD_COMPILER(as3 "Enable compiler for ActionScript 3" OFF) +THRIFT_ADD_COMPILER(dart "Enable compiler for Dart" OFF) +THRIFT_ADD_COMPILER(haxe "Enable compiler for Haxe" OFF) +THRIFT_ADD_COMPILER(csharp "Enable compiler for C#" OFF) +THRIFT_ADD_COMPILER(netcore "Enable compiler for .NET Core" ON) +THRIFT_ADD_COMPILER(py "Enable compiler for Python 2.0" OFF) +THRIFT_ADD_COMPILER(rb "Enable compiler for Ruby" OFF) +THRIFT_ADD_COMPILER(perl "Enable compiler for Perl" OFF) +THRIFT_ADD_COMPILER(php "Enable compiler for PHP" OFF) +THRIFT_ADD_COMPILER(erl "Enable compiler for Erlang" OFF) +THRIFT_ADD_COMPILER(cocoa "Enable compiler for Cocoa Objective-C" OFF) +THRIFT_ADD_COMPILER(swift "Enable compiler for Cocoa Swift" OFF) +THRIFT_ADD_COMPILER(st "Enable compiler for Smalltalk" OFF) +THRIFT_ADD_COMPILER(ocaml "Enable compiler for OCaml" OFF) +THRIFT_ADD_COMPILER(hs "Enable compiler for Haskell" OFF) +THRIFT_ADD_COMPILER(xsd "Enable compiler for XSD" OFF) +THRIFT_ADD_COMPILER(html "Enable compiler for HTML Documentation" OFF) +THRIFT_ADD_COMPILER(js "Enable compiler for JavaScript" OFF) +THRIFT_ADD_COMPILER(json "Enable compiler for JSON" OFF) +THRIFT_ADD_COMPILER(javame "Enable compiler for Java ME" OFF) +THRIFT_ADD_COMPILER(delphi "Enable compiler for Delphi" OFF) +THRIFT_ADD_COMPILER(go "Enable compiler for Go" OFF) +THRIFT_ADD_COMPILER(d "Enable compiler for D" OFF) +THRIFT_ADD_COMPILER(lua "Enable compiler for Lua" OFF) +THRIFT_ADD_COMPILER(gv "Enable compiler for GraphViz" OFF) +THRIFT_ADD_COMPILER(rs "Enable compiler for Rust" OFF) +THRIFT_ADD_COMPILER(xml "Enable compiler for XML" OFF) + +# Thrift is looking for include files in the src directory +# we also add the current binary directory for generated files +include_directories(${CMAKE_CURRENT_BINARY_DIR} ${THRIFT_COMPILER_SOURCE_DIR}/src ${CMAKE_CURRENT_SOURCE_DIR}/catch) + +add_library(thrift_compiler ${thrift_compiler_SOURCES}) + +#link parse lib to thrift_compiler lib +target_link_libraries(thrift_compiler parse) + +# add tests executable +add_executable(thrift_compiler_tests ${thrift_compiler_tests_manual_SOURCES} ${thrift_compiler_tests_SOURCES}) + +# if generates for Visual Studio set thrift_compiler_tests as default project +if(MSVC) + set_property(TARGET thrift_compiler_tests PROPERTY VS_STARTUP_PROJECT thrift_compiler_tests) +endif() + +set_target_properties(thrift_compiler_tests PROPERTIES RUNTIME_OUTPUT_DIRECTORY bin/) +set_target_properties(thrift_compiler_tests PROPERTIES OUTPUT_NAME thrift_compiler_tests) + +target_link_libraries(thrift_compiler_tests thrift_compiler) + +enable_testing() +add_test(NAME ThriftTests COMMAND thrift_compiler_tests) \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/README.md b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/README.md new file mode 100644 index 000000000..27be491cb --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/README.md @@ -0,0 +1,88 @@ +# Build and run compiler tests using CMake + + + +- [Build and run compiler tests using CMake](#build-and-run-compiler-tests-using-cmake) + - [General information](#general-information) + - [How to add your tests](#how-to-add-your-tests) + - [Build and run tests on Unix-like systems](#build-and-run-tests-on-unix-like-systems) + - [Prerequisites:](#prerequisites) + - [Build and run test with CMake](#build-and-run-test-with-cmake) + - [Build and run tests on Windows](#build-and-run-tests-on-windows) + - [Prerequisites:](#prerequisites-1) + - [Generation of VS project with CMake, build and run on Windows](#generation-of-vs-project-with-cmake-build-and-run-on-windows) + + + +## General information + +Added generic way to cover code by tests for many languages (you just need to make a correct header file for generator for your language - example in **netcore** implementation) + +At current moment these tests use free Catch library (https://github.com/catchorg/Catch2/tree/Catch1.x) for easy test creation and usage. +Decision to use it was because of simplicity, easy usage, one header file to use, stable community and growing interest (https://cpp.libhunt.com/project/googletest-google/vs/catch?rel=cmp-cmp) + +Also, maybe, later it will be migrated to Catch2 (https://github.com/philsquared/Catch) - depends on need to support legacy compilers (c++98) + +## How to add your tests + +- Open **CMakeLists.txt** +- Set **On** to call of **THRIFT_ADD_COMPILER** for your language + +``` cmake +THRIFT_ADD_COMPILER(netcore "Enable compiler for .NET Core" ON) +``` + +- Create folder with name specified in list of languages in **CMakeLists.txt** +- Create tests in folder for your language (with extensions like *.c* - cc, cpp, etc) + - Don't forget to add include of catch.hpp in your test file + ``` C + #include "../catch/catch.hpp" + ``` + +- If you need - add files manually to **thrift_compiler_tests_manual_SOURCES** in **CMakeLists.txt** similar to + +``` cmake +# you can add some files manually there +set(thrift_compiler_tests_manual_SOURCES + # tests file to avoid main in every test file + ${CMAKE_CURRENT_SOURCE_DIR}/tests_main.cc +) +``` + +- Run **cmake** with arguments for your environment and compiler +- Enjoy + +## Build and run tests on Unix-like systems + +### Prerequisites: +- Install CMake - +- Install winflexbison - + +### Build and run test with CMake + +- Run commands in command line in current directory: + +``` +mkdir cmake-vs && cd cmake-vs +cmake .. +cmake --build . +ctest -C Debug -V +``` + +## Build and run tests on Windows + +### Prerequisites: +- Install CMake - +- Install winflexbison - +- Install VS2017 Community Edition - (ensure that you installed workload "Desktop Development with C++" for VS2017) + +### Generation of VS project with CMake, build and run on Windows +- Run commands in command line in current directory (ensure that VS installed): + +``` +mkdir cmake-vs +cd cmake-vs +cmake .. +cmake --build . +ctest -C Debug -V +``` \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/catch/catch.hpp b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/catch/catch.hpp new file mode 100644 index 000000000..33d037e55 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/catch/catch.hpp @@ -0,0 +1,11508 @@ +/* + * Catch v1.9.4 + * Generated: 2017-05-16 13:51:55.506519 + * ---------------------------------------------------------- + * This file has been merged from multiple headers. Please don't edit it directly + * Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved. + * + * Distributed under the Boost Software License, Version 1.0. (See accompanying + * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + */ +#ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED +#define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED + +#define TWOBLUECUBES_CATCH_HPP_INCLUDED + +#ifdef __clang__ +# pragma clang system_header +#elif defined __GNUC__ +# pragma GCC system_header +#endif + +// #included from: internal/catch_suppress_warnings.h + +#ifdef __clang__ +# ifdef __ICC // icpc defines the __clang__ macro +# pragma warning(push) +# pragma warning(disable: 161 1682) +# else // __ICC +# pragma clang diagnostic ignored "-Wglobal-constructors" +# pragma clang diagnostic ignored "-Wvariadic-macros" +# pragma clang diagnostic ignored "-Wc99-extensions" +# pragma clang diagnostic ignored "-Wunused-variable" +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wpadded" +# pragma clang diagnostic ignored "-Wc++98-compat" +# pragma clang diagnostic ignored "-Wc++98-compat-pedantic" +# pragma clang diagnostic ignored "-Wswitch-enum" +# pragma clang diagnostic ignored "-Wcovered-switch-default" +# endif +#elif defined __GNUC__ +# pragma GCC diagnostic ignored "-Wvariadic-macros" +# pragma GCC diagnostic ignored "-Wunused-variable" +# pragma GCC diagnostic ignored "-Wparentheses" + +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wpadded" +#endif +#if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER) +# define CATCH_IMPL +#endif + +#ifdef CATCH_IMPL +# ifndef CLARA_CONFIG_MAIN +# define CLARA_CONFIG_MAIN_NOT_DEFINED +# define CLARA_CONFIG_MAIN +# endif +#endif + +// #included from: internal/catch_notimplemented_exception.h +#define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_H_INCLUDED + +// #included from: catch_common.h +#define TWOBLUECUBES_CATCH_COMMON_H_INCLUDED + +// #included from: catch_compiler_capabilities.h +#define TWOBLUECUBES_CATCH_COMPILER_CAPABILITIES_HPP_INCLUDED + +// Detect a number of compiler features - mostly C++11/14 conformance - by compiler +// The following features are defined: +// +// CATCH_CONFIG_CPP11_NULLPTR : is nullptr supported? +// CATCH_CONFIG_CPP11_NOEXCEPT : is noexcept supported? +// CATCH_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods +// CATCH_CONFIG_CPP11_IS_ENUM : std::is_enum is supported? +// CATCH_CONFIG_CPP11_TUPLE : std::tuple is supported +// CATCH_CONFIG_CPP11_LONG_LONG : is long long supported? +// CATCH_CONFIG_CPP11_OVERRIDE : is override supported? +// CATCH_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) +// CATCH_CONFIG_CPP11_SHUFFLE : is std::shuffle supported? +// CATCH_CONFIG_CPP11_TYPE_TRAITS : are type_traits and enable_if supported? + +// CATCH_CONFIG_CPP11_OR_GREATER : Is C++11 supported? + +// CATCH_CONFIG_VARIADIC_MACROS : are variadic macros supported? +// CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported? +// CATCH_CONFIG_WINDOWS_SEH : is Windows SEH supported? +// CATCH_CONFIG_POSIX_SIGNALS : are POSIX signals supported? +// **************** +// Note to maintainers: if new toggles are added please document them +// in configuration.md, too +// **************** + +// In general each macro has a _NO_ form +// (e.g. CATCH_CONFIG_CPP11_NO_NULLPTR) which disables the feature. +// Many features, at point of detection, define an _INTERNAL_ macro, so they +// can be combined, en-mass, with the _NO_ forms later. + +// All the C++11 features can be disabled with CATCH_CONFIG_NO_CPP11 + +#ifdef __cplusplus + +# if __cplusplus >= 201103L +# define CATCH_CPP11_OR_GREATER +# endif + +# if __cplusplus >= 201402L +# define CATCH_CPP14_OR_GREATER +# endif + +#endif + +#ifdef __clang__ + +# if __has_feature(cxx_nullptr) +# define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR +# endif + +# if __has_feature(cxx_noexcept) +# define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT +# endif + +# if defined(CATCH_CPP11_OR_GREATER) +# define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + _Pragma( "clang diagnostic push" ) \ + _Pragma( "clang diagnostic ignored \"-Wexit-time-destructors\"" ) +# define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ + _Pragma( "clang diagnostic pop" ) + +# define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ + _Pragma( "clang diagnostic push" ) \ + _Pragma( "clang diagnostic ignored \"-Wparentheses\"" ) +# define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ + _Pragma( "clang diagnostic pop" ) +# endif + +#endif // __clang__ + +//////////////////////////////////////////////////////////////////////////////// +// We know some environments not to support full POSIX signals +#if defined(__CYGWIN__) || defined(__QNX__) + +# if !defined(CATCH_CONFIG_POSIX_SIGNALS) +# define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS +# endif + +#endif + +//////////////////////////////////////////////////////////////////////////////// +// Cygwin +#ifdef __CYGWIN__ + +// Required for some versions of Cygwin to declare gettimeofday +// see: http://stackoverflow.com/questions/36901803/gettimeofday-not-declared-in-this-scope-cygwin +# define _BSD_SOURCE + +#endif // __CYGWIN__ + +//////////////////////////////////////////////////////////////////////////////// +// Borland +#ifdef __BORLANDC__ + +#endif // __BORLANDC__ + +//////////////////////////////////////////////////////////////////////////////// +// EDG +#ifdef __EDG_VERSION__ + +#endif // __EDG_VERSION__ + +//////////////////////////////////////////////////////////////////////////////// +// Digital Mars +#ifdef __DMC__ + +#endif // __DMC__ + +//////////////////////////////////////////////////////////////////////////////// +// GCC +#ifdef __GNUC__ + +# if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) +# define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR +# endif + +// - otherwise more recent versions define __cplusplus >= 201103L +// and will get picked up below + +#endif // __GNUC__ + +//////////////////////////////////////////////////////////////////////////////// +// Visual C++ +#ifdef _MSC_VER + +#define CATCH_INTERNAL_CONFIG_WINDOWS_SEH + +#if (_MSC_VER >= 1600) +# define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR +# define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR +#endif + +#if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) +#define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT +#define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS +#define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE +#define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS +#endif + +#endif // _MSC_VER + +//////////////////////////////////////////////////////////////////////////////// + +// Use variadic macros if the compiler supports them +#if ( defined _MSC_VER && _MSC_VER > 1400 && !defined __EDGE__) || \ + ( defined __WAVE__ && __WAVE_HAS_VARIADICS ) || \ + ( defined __GNUC__ && __GNUC__ >= 3 ) || \ + ( !defined __cplusplus && __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L ) + +#define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS + +#endif + +// Use __COUNTER__ if the compiler supports it +#if ( defined _MSC_VER && _MSC_VER >= 1300 ) || \ + ( defined __GNUC__ && __GNUC__ >= 4 && __GNUC_MINOR__ >= 3 ) || \ + ( defined __clang__ && __clang_major__ >= 3 ) + +#define CATCH_INTERNAL_CONFIG_COUNTER + +#endif + +//////////////////////////////////////////////////////////////////////////////// +// C++ language feature support + +// catch all support for C++11 +#if defined(CATCH_CPP11_OR_GREATER) + +# if !defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) +# define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR +# endif + +# ifndef CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT +# define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT +# endif + +# ifndef CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS +# define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS +# endif + +# ifndef CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM +# define CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM +# endif + +# ifndef CATCH_INTERNAL_CONFIG_CPP11_TUPLE +# define CATCH_INTERNAL_CONFIG_CPP11_TUPLE +# endif + +# ifndef CATCH_INTERNAL_CONFIG_VARIADIC_MACROS +# define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS +# endif + +# if !defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) +# define CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG +# endif + +# if !defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) +# define CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE +# endif +# if !defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) +# define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR +# endif +# if !defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) +# define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE +# endif +# if !defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) +# define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS +# endif + +#endif // __cplusplus >= 201103L + +// Now set the actual defines based on the above + anything the user has configured +#if defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NO_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_NULLPTR +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_NOEXCEPT +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_GENERATED_METHODS +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_NO_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_IS_ENUM +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_CPP11_NO_TUPLE) && !defined(CATCH_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_TUPLE +#endif +#if defined(CATCH_INTERNAL_CONFIG_VARIADIC_MACROS) && !defined(CATCH_CONFIG_NO_VARIADIC_MACROS) && !defined(CATCH_CONFIG_VARIADIC_MACROS) +# define CATCH_CONFIG_VARIADIC_MACROS +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_NO_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_LONG_LONG +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_NO_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_OVERRIDE +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_NO_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_UNIQUE_PTR +#endif +// Use of __COUNTER__ is suppressed if __JETBRAINS_IDE__ is #defined (meaning we're being parsed by a JetBrains IDE for +// analytics) because, at time of writing, __COUNTER__ is not properly handled by it. +// This does not affect compilation +#if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER) && !defined(__JETBRAINS_IDE__) +# define CATCH_CONFIG_COUNTER +#endif +#if defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_NO_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_SHUFFLE +#endif +# if defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_NO_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_NO_CPP11) +# define CATCH_CONFIG_CPP11_TYPE_TRAITS +# endif +#if defined(CATCH_INTERNAL_CONFIG_WINDOWS_SEH) && !defined(CATCH_CONFIG_NO_WINDOWS_SEH) && !defined(CATCH_CONFIG_WINDOWS_SEH) +# define CATCH_CONFIG_WINDOWS_SEH +#endif +// This is set by default, because we assume that unix compilers are posix-signal-compatible by default. +#if !defined(CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_POSIX_SIGNALS) +# define CATCH_CONFIG_POSIX_SIGNALS +#endif + +#if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) +# define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS +# define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS +#endif +#if !defined(CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS) +# define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS +# define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS +#endif + +// noexcept support: +#if defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_NOEXCEPT) +# define CATCH_NOEXCEPT noexcept +# define CATCH_NOEXCEPT_IS(x) noexcept(x) +#else +# define CATCH_NOEXCEPT throw() +# define CATCH_NOEXCEPT_IS(x) +#endif + +// nullptr support +#ifdef CATCH_CONFIG_CPP11_NULLPTR +# define CATCH_NULL nullptr +#else +# define CATCH_NULL NULL +#endif + +// override support +#ifdef CATCH_CONFIG_CPP11_OVERRIDE +# define CATCH_OVERRIDE override +#else +# define CATCH_OVERRIDE +#endif + +// unique_ptr support +#ifdef CATCH_CONFIG_CPP11_UNIQUE_PTR +# define CATCH_AUTO_PTR( T ) std::unique_ptr +#else +# define CATCH_AUTO_PTR( T ) std::auto_ptr +#endif + +#define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line +#define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) +#ifdef CATCH_CONFIG_COUNTER +# define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ ) +#else +# define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ ) +#endif + +#define INTERNAL_CATCH_STRINGIFY2( expr ) #expr +#define INTERNAL_CATCH_STRINGIFY( expr ) INTERNAL_CATCH_STRINGIFY2( expr ) + +#include +#include + +namespace Catch { + + struct IConfig; + + struct CaseSensitive { enum Choice { + Yes, + No + }; }; + + class NonCopyable { +#ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + NonCopyable( NonCopyable const& ) = delete; + NonCopyable( NonCopyable && ) = delete; + NonCopyable& operator = ( NonCopyable const& ) = delete; + NonCopyable& operator = ( NonCopyable && ) = delete; +#else + NonCopyable( NonCopyable const& info ); + NonCopyable& operator = ( NonCopyable const& ); +#endif + + protected: + NonCopyable() {} + virtual ~NonCopyable(); + }; + + class SafeBool { + public: + typedef void (SafeBool::*type)() const; + + static type makeSafe( bool value ) { + return value ? &SafeBool::trueValue : 0; + } + private: + void trueValue() const {} + }; + + template + inline void deleteAll( ContainerT& container ) { + typename ContainerT::const_iterator it = container.begin(); + typename ContainerT::const_iterator itEnd = container.end(); + for(; it != itEnd; ++it ) + delete *it; + } + template + inline void deleteAllValues( AssociativeContainerT& container ) { + typename AssociativeContainerT::const_iterator it = container.begin(); + typename AssociativeContainerT::const_iterator itEnd = container.end(); + for(; it != itEnd; ++it ) + delete it->second; + } + + bool startsWith( std::string const& s, std::string const& prefix ); + bool startsWith( std::string const& s, char prefix ); + bool endsWith( std::string const& s, std::string const& suffix ); + bool endsWith( std::string const& s, char suffix ); + bool contains( std::string const& s, std::string const& infix ); + void toLowerInPlace( std::string& s ); + std::string toLower( std::string const& s ); + std::string trim( std::string const& str ); + bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ); + + struct pluralise { + pluralise( std::size_t count, std::string const& label ); + + friend std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ); + + std::size_t m_count; + std::string m_label; + }; + + struct SourceLineInfo { + + SourceLineInfo(); + SourceLineInfo( char const* _file, std::size_t _line ); +# ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + SourceLineInfo(SourceLineInfo const& other) = default; + SourceLineInfo( SourceLineInfo && ) = default; + SourceLineInfo& operator = ( SourceLineInfo const& ) = default; + SourceLineInfo& operator = ( SourceLineInfo && ) = default; +# endif + bool empty() const; + bool operator == ( SourceLineInfo const& other ) const; + bool operator < ( SourceLineInfo const& other ) const; + + char const* file; + std::size_t line; + }; + + std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ); + + // This is just here to avoid compiler warnings with macro constants and boolean literals + inline bool isTrue( bool value ){ return value; } + inline bool alwaysTrue() { return true; } + inline bool alwaysFalse() { return false; } + + void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ); + + void seedRng( IConfig const& config ); + unsigned int rngSeed(); + + // Use this in variadic streaming macros to allow + // >> +StreamEndStop + // as well as + // >> stuff +StreamEndStop + struct StreamEndStop { + std::string operator+() { + return std::string(); + } + }; + template + T const& operator + ( T const& value, StreamEndStop ) { + return value; + } +} + +#define CATCH_INTERNAL_LINEINFO ::Catch::SourceLineInfo( __FILE__, static_cast( __LINE__ ) ) +#define CATCH_INTERNAL_ERROR( msg ) ::Catch::throwLogicError( msg, CATCH_INTERNAL_LINEINFO ); + +namespace Catch { + + class NotImplementedException : public std::exception + { + public: + NotImplementedException( SourceLineInfo const& lineInfo ); + NotImplementedException( NotImplementedException const& ) {} + + virtual ~NotImplementedException() CATCH_NOEXCEPT {} + + virtual const char* what() const CATCH_NOEXCEPT; + + private: + std::string m_what; + SourceLineInfo m_lineInfo; + }; + +} // end namespace Catch + +/////////////////////////////////////////////////////////////////////////////// +#define CATCH_NOT_IMPLEMENTED throw Catch::NotImplementedException( CATCH_INTERNAL_LINEINFO ) + +// #included from: internal/catch_context.h +#define TWOBLUECUBES_CATCH_CONTEXT_H_INCLUDED + +// #included from: catch_interfaces_generators.h +#define TWOBLUECUBES_CATCH_INTERFACES_GENERATORS_H_INCLUDED + +#include + +namespace Catch { + + struct IGeneratorInfo { + virtual ~IGeneratorInfo(); + virtual bool moveNext() = 0; + virtual std::size_t getCurrentIndex() const = 0; + }; + + struct IGeneratorsForTest { + virtual ~IGeneratorsForTest(); + + virtual IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) = 0; + virtual bool moveNext() = 0; + }; + + IGeneratorsForTest* createGeneratorsForTest(); + +} // end namespace Catch + +// #included from: catch_ptr.hpp +#define TWOBLUECUBES_CATCH_PTR_HPP_INCLUDED + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpadded" +#endif + +namespace Catch { + + // An intrusive reference counting smart pointer. + // T must implement addRef() and release() methods + // typically implementing the IShared interface + template + class Ptr { + public: + Ptr() : m_p( CATCH_NULL ){} + Ptr( T* p ) : m_p( p ){ + if( m_p ) + m_p->addRef(); + } + Ptr( Ptr const& other ) : m_p( other.m_p ){ + if( m_p ) + m_p->addRef(); + } + ~Ptr(){ + if( m_p ) + m_p->release(); + } + void reset() { + if( m_p ) + m_p->release(); + m_p = CATCH_NULL; + } + Ptr& operator = ( T* p ){ + Ptr temp( p ); + swap( temp ); + return *this; + } + Ptr& operator = ( Ptr const& other ){ + Ptr temp( other ); + swap( temp ); + return *this; + } + void swap( Ptr& other ) { std::swap( m_p, other.m_p ); } + T* get() const{ return m_p; } + T& operator*() const { return *m_p; } + T* operator->() const { return m_p; } + bool operator !() const { return m_p == CATCH_NULL; } + operator SafeBool::type() const { return SafeBool::makeSafe( m_p != CATCH_NULL ); } + + private: + T* m_p; + }; + + struct IShared : NonCopyable { + virtual ~IShared(); + virtual void addRef() const = 0; + virtual void release() const = 0; + }; + + template + struct SharedImpl : T { + + SharedImpl() : m_rc( 0 ){} + + virtual void addRef() const { + ++m_rc; + } + virtual void release() const { + if( --m_rc == 0 ) + delete this; + } + + mutable unsigned int m_rc; + }; + +} // end namespace Catch + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +namespace Catch { + + class TestCase; + class Stream; + struct IResultCapture; + struct IRunner; + struct IGeneratorsForTest; + struct IConfig; + + struct IContext + { + virtual ~IContext(); + + virtual IResultCapture* getResultCapture() = 0; + virtual IRunner* getRunner() = 0; + virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) = 0; + virtual bool advanceGeneratorsForCurrentTest() = 0; + virtual Ptr getConfig() const = 0; + }; + + struct IMutableContext : IContext + { + virtual ~IMutableContext(); + virtual void setResultCapture( IResultCapture* resultCapture ) = 0; + virtual void setRunner( IRunner* runner ) = 0; + virtual void setConfig( Ptr const& config ) = 0; + }; + + IContext& getCurrentContext(); + IMutableContext& getCurrentMutableContext(); + void cleanUpContext(); + Stream createStream( std::string const& streamName ); + +} + +// #included from: internal/catch_test_registry.hpp +#define TWOBLUECUBES_CATCH_TEST_REGISTRY_HPP_INCLUDED + +// #included from: catch_interfaces_testcase.h +#define TWOBLUECUBES_CATCH_INTERFACES_TESTCASE_H_INCLUDED + +#include + +namespace Catch { + + class TestSpec; + + struct ITestCase : IShared { + virtual void invoke () const = 0; + protected: + virtual ~ITestCase(); + }; + + class TestCase; + struct IConfig; + + struct ITestCaseRegistry { + virtual ~ITestCaseRegistry(); + virtual std::vector const& getAllTests() const = 0; + virtual std::vector const& getAllTestsSorted( IConfig const& config ) const = 0; + }; + + bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ); + std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ); + std::vector const& getAllTestCasesSorted( IConfig const& config ); + +} + +namespace Catch { + +template +class MethodTestCase : public SharedImpl { + +public: + MethodTestCase( void (C::*method)() ) : m_method( method ) {} + + virtual void invoke() const { + C obj; + (obj.*m_method)(); + } + +private: + virtual ~MethodTestCase() {} + + void (C::*m_method)(); +}; + +typedef void(*TestFunction)(); + +struct NameAndDesc { + NameAndDesc( const char* _name = "", const char* _description= "" ) + : name( _name ), description( _description ) + {} + + const char* name; + const char* description; +}; + +void registerTestCase + ( ITestCase* testCase, + char const* className, + NameAndDesc const& nameAndDesc, + SourceLineInfo const& lineInfo ); + +struct AutoReg { + + AutoReg + ( TestFunction function, + SourceLineInfo const& lineInfo, + NameAndDesc const& nameAndDesc ); + + template + AutoReg + ( void (C::*method)(), + char const* className, + NameAndDesc const& nameAndDesc, + SourceLineInfo const& lineInfo ) { + + registerTestCase + ( new MethodTestCase( method ), + className, + nameAndDesc, + lineInfo ); + } + + ~AutoReg(); + +private: + AutoReg( AutoReg const& ); + void operator= ( AutoReg const& ); +}; + +void registerTestCaseFunction + ( TestFunction function, + SourceLineInfo const& lineInfo, + NameAndDesc const& nameAndDesc ); + +} // end namespace Catch + +#ifdef CATCH_CONFIG_VARIADIC_MACROS + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_TESTCASE2( TestName, ... ) \ + static void TestName(); \ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); } \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ + static void TestName() + #define INTERNAL_CATCH_TESTCASE( ... ) \ + INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), __VA_ARGS__ ) + + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, ... ) \ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); } \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS + + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestName, ClassName, ... )\ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + namespace{ \ + struct TestName : ClassName{ \ + void test(); \ + }; \ + Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestName::test, #ClassName, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); \ + } \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ + void TestName::test() + #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, ... ) \ + INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, __VA_ARGS__ ) + + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, ... ) \ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS + +#else + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_TESTCASE2( TestName, Name, Desc ) \ + static void TestName(); \ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); }\ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ + static void TestName() + #define INTERNAL_CATCH_TESTCASE( Name, Desc ) \ + INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), Name, Desc ) + + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, Name, Desc ) \ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( Name, Desc ), CATCH_INTERNAL_LINEINFO ); } \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS + + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestCaseName, ClassName, TestName, Desc )\ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + namespace{ \ + struct TestCaseName : ClassName{ \ + void test(); \ + }; \ + Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestCaseName::test, #ClassName, Catch::NameAndDesc( TestName, Desc ), CATCH_INTERNAL_LINEINFO ); \ + } \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ + void TestCaseName::test() + #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, TestName, Desc )\ + INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, TestName, Desc ) + + /////////////////////////////////////////////////////////////////////////////// + #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, Name, Desc ) \ + CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ + Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); \ + CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS + +#endif + +// #included from: internal/catch_capture.hpp +#define TWOBLUECUBES_CATCH_CAPTURE_HPP_INCLUDED + +// #included from: catch_result_builder.h +#define TWOBLUECUBES_CATCH_RESULT_BUILDER_H_INCLUDED + +// #included from: catch_result_type.h +#define TWOBLUECUBES_CATCH_RESULT_TYPE_H_INCLUDED + +namespace Catch { + + // ResultWas::OfType enum + struct ResultWas { enum OfType { + Unknown = -1, + Ok = 0, + Info = 1, + Warning = 2, + + FailureBit = 0x10, + + ExpressionFailed = FailureBit | 1, + ExplicitFailure = FailureBit | 2, + + Exception = 0x100 | FailureBit, + + ThrewException = Exception | 1, + DidntThrowException = Exception | 2, + + FatalErrorCondition = 0x200 | FailureBit + + }; }; + + inline bool isOk( ResultWas::OfType resultType ) { + return ( resultType & ResultWas::FailureBit ) == 0; + } + inline bool isJustInfo( int flags ) { + return flags == ResultWas::Info; + } + + // ResultDisposition::Flags enum + struct ResultDisposition { enum Flags { + Normal = 0x01, + + ContinueOnFailure = 0x02, // Failures fail test, but execution continues + FalseTest = 0x04, // Prefix expression with ! + SuppressFail = 0x08 // Failures are reported but do not fail the test + }; }; + + inline ResultDisposition::Flags operator | ( ResultDisposition::Flags lhs, ResultDisposition::Flags rhs ) { + return static_cast( static_cast( lhs ) | static_cast( rhs ) ); + } + + inline bool shouldContinueOnFailure( int flags ) { return ( flags & ResultDisposition::ContinueOnFailure ) != 0; } + inline bool isFalseTest( int flags ) { return ( flags & ResultDisposition::FalseTest ) != 0; } + inline bool shouldSuppressFailure( int flags ) { return ( flags & ResultDisposition::SuppressFail ) != 0; } + +} // end namespace Catch + +// #included from: catch_assertionresult.h +#define TWOBLUECUBES_CATCH_ASSERTIONRESULT_H_INCLUDED + +#include + +namespace Catch { + + struct STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison; + + struct DecomposedExpression + { + virtual ~DecomposedExpression() {} + virtual bool isBinaryExpression() const { + return false; + } + virtual void reconstructExpression( std::string& dest ) const = 0; + + // Only simple binary comparisons can be decomposed. + // If more complex check is required then wrap sub-expressions in parentheses. + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator + ( T const& ); + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator - ( T const& ); + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator * ( T const& ); + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator / ( T const& ); + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator % ( T const& ); + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator && ( T const& ); + template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator || ( T const& ); + + private: + DecomposedExpression& operator = (DecomposedExpression const&); + }; + + struct AssertionInfo + { + AssertionInfo() {} + AssertionInfo( std::string const& _macroName, + SourceLineInfo const& _lineInfo, + std::string const& _capturedExpression, + ResultDisposition::Flags _resultDisposition ); + + std::string macroName; + SourceLineInfo lineInfo; + std::string capturedExpression; + ResultDisposition::Flags resultDisposition; + }; + + struct AssertionResultData + { + AssertionResultData() : decomposedExpression( CATCH_NULL ) + , resultType( ResultWas::Unknown ) + , negated( false ) + , parenthesized( false ) {} + + void negate( bool parenthesize ) { + negated = !negated; + parenthesized = parenthesize; + if( resultType == ResultWas::Ok ) + resultType = ResultWas::ExpressionFailed; + else if( resultType == ResultWas::ExpressionFailed ) + resultType = ResultWas::Ok; + } + + std::string const& reconstructExpression() const { + if( decomposedExpression != CATCH_NULL ) { + decomposedExpression->reconstructExpression( reconstructedExpression ); + if( parenthesized ) { + reconstructedExpression.insert( 0, 1, '(' ); + reconstructedExpression.append( 1, ')' ); + } + if( negated ) { + reconstructedExpression.insert( 0, 1, '!' ); + } + decomposedExpression = CATCH_NULL; + } + return reconstructedExpression; + } + + mutable DecomposedExpression const* decomposedExpression; + mutable std::string reconstructedExpression; + std::string message; + ResultWas::OfType resultType; + bool negated; + bool parenthesized; + }; + + class AssertionResult { + public: + AssertionResult(); + AssertionResult( AssertionInfo const& info, AssertionResultData const& data ); + ~AssertionResult(); +# ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + AssertionResult( AssertionResult const& ) = default; + AssertionResult( AssertionResult && ) = default; + AssertionResult& operator = ( AssertionResult const& ) = default; + AssertionResult& operator = ( AssertionResult && ) = default; +# endif + + bool isOk() const; + bool succeeded() const; + ResultWas::OfType getResultType() const; + bool hasExpression() const; + bool hasMessage() const; + std::string getExpression() const; + std::string getExpressionInMacro() const; + bool hasExpandedExpression() const; + std::string getExpandedExpression() const; + std::string getMessage() const; + SourceLineInfo getSourceInfo() const; + std::string getTestMacroName() const; + void discardDecomposedExpression() const; + void expandDecomposedExpression() const; + + protected: + AssertionInfo m_info; + AssertionResultData m_resultData; + }; + +} // end namespace Catch + +// #included from: catch_matchers.hpp +#define TWOBLUECUBES_CATCH_MATCHERS_HPP_INCLUDED + +namespace Catch { +namespace Matchers { + namespace Impl { + + template struct MatchAllOf; + template struct MatchAnyOf; + template struct MatchNotOf; + + class MatcherUntypedBase { + public: + std::string toString() const { + if( m_cachedToString.empty() ) + m_cachedToString = describe(); + return m_cachedToString; + } + + protected: + virtual ~MatcherUntypedBase(); + virtual std::string describe() const = 0; + mutable std::string m_cachedToString; + private: + MatcherUntypedBase& operator = ( MatcherUntypedBase const& ); + }; + + template + struct MatcherMethod { + virtual bool match( ObjectT const& arg ) const = 0; + }; + template + struct MatcherMethod { + virtual bool match( PtrT* arg ) const = 0; + }; + + template + struct MatcherBase : MatcherUntypedBase, MatcherMethod { + + MatchAllOf operator && ( MatcherBase const& other ) const; + MatchAnyOf operator || ( MatcherBase const& other ) const; + MatchNotOf operator ! () const; + }; + + template + struct MatchAllOf : MatcherBase { + virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { + for( std::size_t i = 0; i < m_matchers.size(); ++i ) { + if (!m_matchers[i]->match(arg)) + return false; + } + return true; + } + virtual std::string describe() const CATCH_OVERRIDE { + std::string description; + description.reserve( 4 + m_matchers.size()*32 ); + description += "( "; + for( std::size_t i = 0; i < m_matchers.size(); ++i ) { + if( i != 0 ) + description += " and "; + description += m_matchers[i]->toString(); + } + description += " )"; + return description; + } + + MatchAllOf& operator && ( MatcherBase const& other ) { + m_matchers.push_back( &other ); + return *this; + } + + std::vector const*> m_matchers; + }; + template + struct MatchAnyOf : MatcherBase { + + virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { + for( std::size_t i = 0; i < m_matchers.size(); ++i ) { + if (m_matchers[i]->match(arg)) + return true; + } + return false; + } + virtual std::string describe() const CATCH_OVERRIDE { + std::string description; + description.reserve( 4 + m_matchers.size()*32 ); + description += "( "; + for( std::size_t i = 0; i < m_matchers.size(); ++i ) { + if( i != 0 ) + description += " or "; + description += m_matchers[i]->toString(); + } + description += " )"; + return description; + } + + MatchAnyOf& operator || ( MatcherBase const& other ) { + m_matchers.push_back( &other ); + return *this; + } + + std::vector const*> m_matchers; + }; + + template + struct MatchNotOf : MatcherBase { + + MatchNotOf( MatcherBase const& underlyingMatcher ) : m_underlyingMatcher( underlyingMatcher ) {} + + virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { + return !m_underlyingMatcher.match( arg ); + } + + virtual std::string describe() const CATCH_OVERRIDE { + return "not " + m_underlyingMatcher.toString(); + } + MatcherBase const& m_underlyingMatcher; + }; + + template + MatchAllOf MatcherBase::operator && ( MatcherBase const& other ) const { + return MatchAllOf() && *this && other; + } + template + MatchAnyOf MatcherBase::operator || ( MatcherBase const& other ) const { + return MatchAnyOf() || *this || other; + } + template + MatchNotOf MatcherBase::operator ! () const { + return MatchNotOf( *this ); + } + + } // namespace Impl + + // The following functions create the actual matcher objects. + // This allows the types to be inferred + // - deprecated: prefer ||, && and ! + template + inline Impl::MatchNotOf Not( Impl::MatcherBase const& underlyingMatcher ) { + return Impl::MatchNotOf( underlyingMatcher ); + } + template + inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { + return Impl::MatchAllOf() && m1 && m2; + } + template + inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { + return Impl::MatchAllOf() && m1 && m2 && m3; + } + template + inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { + return Impl::MatchAnyOf() || m1 || m2; + } + template + inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { + return Impl::MatchAnyOf() || m1 || m2 || m3; + } + +} // namespace Matchers + +using namespace Matchers; +using Matchers::Impl::MatcherBase; + +} // namespace Catch + +namespace Catch { + + struct TestFailureException{}; + + template class ExpressionLhs; + + struct CopyableStream { + CopyableStream() {} + CopyableStream( CopyableStream const& other ) { + oss << other.oss.str(); + } + CopyableStream& operator=( CopyableStream const& other ) { + oss.str(std::string()); + oss << other.oss.str(); + return *this; + } + std::ostringstream oss; + }; + + class ResultBuilder : public DecomposedExpression { + public: + ResultBuilder( char const* macroName, + SourceLineInfo const& lineInfo, + char const* capturedExpression, + ResultDisposition::Flags resultDisposition, + char const* secondArg = "" ); + ~ResultBuilder(); + + template + ExpressionLhs operator <= ( T const& operand ); + ExpressionLhs operator <= ( bool value ); + + template + ResultBuilder& operator << ( T const& value ) { + m_stream.oss << value; + return *this; + } + + ResultBuilder& setResultType( ResultWas::OfType result ); + ResultBuilder& setResultType( bool result ); + + void endExpression( DecomposedExpression const& expr ); + + virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE; + + AssertionResult build() const; + AssertionResult build( DecomposedExpression const& expr ) const; + + void useActiveException( ResultDisposition::Flags resultDisposition = ResultDisposition::Normal ); + void captureResult( ResultWas::OfType resultType ); + void captureExpression(); + void captureExpectedException( std::string const& expectedMessage ); + void captureExpectedException( Matchers::Impl::MatcherBase const& matcher ); + void handleResult( AssertionResult const& result ); + void react(); + bool shouldDebugBreak() const; + bool allowThrows() const; + + template + void captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ); + + void setExceptionGuard(); + void unsetExceptionGuard(); + + private: + AssertionInfo m_assertionInfo; + AssertionResultData m_data; + CopyableStream m_stream; + + bool m_shouldDebugBreak; + bool m_shouldThrow; + bool m_guardException; + }; + +} // namespace Catch + +// Include after due to circular dependency: +// #included from: catch_expression_lhs.hpp +#define TWOBLUECUBES_CATCH_EXPRESSION_LHS_HPP_INCLUDED + +// #included from: catch_evaluate.hpp +#define TWOBLUECUBES_CATCH_EVALUATE_HPP_INCLUDED + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable:4389) // '==' : signed/unsigned mismatch +#pragma warning(disable:4312) // Converting int to T* using reinterpret_cast (issue on x64 platform) +#endif + +#include + +namespace Catch { +namespace Internal { + + enum Operator { + IsEqualTo, + IsNotEqualTo, + IsLessThan, + IsGreaterThan, + IsLessThanOrEqualTo, + IsGreaterThanOrEqualTo + }; + + template struct OperatorTraits { static const char* getName(){ return "*error*"; } }; + template<> struct OperatorTraits { static const char* getName(){ return "=="; } }; + template<> struct OperatorTraits { static const char* getName(){ return "!="; } }; + template<> struct OperatorTraits { static const char* getName(){ return "<"; } }; + template<> struct OperatorTraits { static const char* getName(){ return ">"; } }; + template<> struct OperatorTraits { static const char* getName(){ return "<="; } }; + template<> struct OperatorTraits{ static const char* getName(){ return ">="; } }; + + template + inline T& opCast(T const& t) { return const_cast(t); } + +// nullptr_t support based on pull request #154 from Konstantin Baumann +#ifdef CATCH_CONFIG_CPP11_NULLPTR + inline std::nullptr_t opCast(std::nullptr_t) { return nullptr; } +#endif // CATCH_CONFIG_CPP11_NULLPTR + + // So the compare overloads can be operator agnostic we convey the operator as a template + // enum, which is used to specialise an Evaluator for doing the comparison. + template + class Evaluator{}; + + template + struct Evaluator { + static bool evaluate( T1 const& lhs, T2 const& rhs) { + return bool( opCast( lhs ) == opCast( rhs ) ); + } + }; + template + struct Evaluator { + static bool evaluate( T1 const& lhs, T2 const& rhs ) { + return bool( opCast( lhs ) != opCast( rhs ) ); + } + }; + template + struct Evaluator { + static bool evaluate( T1 const& lhs, T2 const& rhs ) { + return bool( opCast( lhs ) < opCast( rhs ) ); + } + }; + template + struct Evaluator { + static bool evaluate( T1 const& lhs, T2 const& rhs ) { + return bool( opCast( lhs ) > opCast( rhs ) ); + } + }; + template + struct Evaluator { + static bool evaluate( T1 const& lhs, T2 const& rhs ) { + return bool( opCast( lhs ) >= opCast( rhs ) ); + } + }; + template + struct Evaluator { + static bool evaluate( T1 const& lhs, T2 const& rhs ) { + return bool( opCast( lhs ) <= opCast( rhs ) ); + } + }; + + template + bool applyEvaluator( T1 const& lhs, T2 const& rhs ) { + return Evaluator::evaluate( lhs, rhs ); + } + + // This level of indirection allows us to specialise for integer types + // to avoid signed/ unsigned warnings + + // "base" overload + template + bool compare( T1 const& lhs, T2 const& rhs ) { + return Evaluator::evaluate( lhs, rhs ); + } + + // unsigned X to int + template bool compare( unsigned int lhs, int rhs ) { + return applyEvaluator( lhs, static_cast( rhs ) ); + } + template bool compare( unsigned long lhs, int rhs ) { + return applyEvaluator( lhs, static_cast( rhs ) ); + } + template bool compare( unsigned char lhs, int rhs ) { + return applyEvaluator( lhs, static_cast( rhs ) ); + } + + // unsigned X to long + template bool compare( unsigned int lhs, long rhs ) { + return applyEvaluator( lhs, static_cast( rhs ) ); + } + template bool compare( unsigned long lhs, long rhs ) { + return applyEvaluator( lhs, static_cast( rhs ) ); + } + template bool compare( unsigned char lhs, long rhs ) { + return applyEvaluator( lhs, static_cast( rhs ) ); + } + + // int to unsigned X + template bool compare( int lhs, unsigned int rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( int lhs, unsigned long rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( int lhs, unsigned char rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + + // long to unsigned X + template bool compare( long lhs, unsigned int rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( long lhs, unsigned long rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( long lhs, unsigned char rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + + // pointer to long (when comparing against NULL) + template bool compare( long lhs, T* rhs ) { + return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); + } + template bool compare( T* lhs, long rhs ) { + return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); + } + + // pointer to int (when comparing against NULL) + template bool compare( int lhs, T* rhs ) { + return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); + } + template bool compare( T* lhs, int rhs ) { + return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); + } + +#ifdef CATCH_CONFIG_CPP11_LONG_LONG + // long long to unsigned X + template bool compare( long long lhs, unsigned int rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( long long lhs, unsigned long rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( long long lhs, unsigned long long rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( long long lhs, unsigned char rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + + // unsigned long long to X + template bool compare( unsigned long long lhs, int rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( unsigned long long lhs, long rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( unsigned long long lhs, long long rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + template bool compare( unsigned long long lhs, char rhs ) { + return applyEvaluator( static_cast( lhs ), rhs ); + } + + // pointer to long long (when comparing against NULL) + template bool compare( long long lhs, T* rhs ) { + return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); + } + template bool compare( T* lhs, long long rhs ) { + return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); + } +#endif // CATCH_CONFIG_CPP11_LONG_LONG + +#ifdef CATCH_CONFIG_CPP11_NULLPTR + // pointer to nullptr_t (when comparing against nullptr) + template bool compare( std::nullptr_t, T* rhs ) { + return Evaluator::evaluate( nullptr, rhs ); + } + template bool compare( T* lhs, std::nullptr_t ) { + return Evaluator::evaluate( lhs, nullptr ); + } +#endif // CATCH_CONFIG_CPP11_NULLPTR + +} // end of namespace Internal +} // end of namespace Catch + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +// #included from: catch_tostring.h +#define TWOBLUECUBES_CATCH_TOSTRING_H_INCLUDED + +#include +#include +#include +#include +#include + +#ifdef __OBJC__ +// #included from: catch_objc_arc.hpp +#define TWOBLUECUBES_CATCH_OBJC_ARC_HPP_INCLUDED + +#import + +#ifdef __has_feature +#define CATCH_ARC_ENABLED __has_feature(objc_arc) +#else +#define CATCH_ARC_ENABLED 0 +#endif + +void arcSafeRelease( NSObject* obj ); +id performOptionalSelector( id obj, SEL sel ); + +#if !CATCH_ARC_ENABLED +inline void arcSafeRelease( NSObject* obj ) { + [obj release]; +} +inline id performOptionalSelector( id obj, SEL sel ) { + if( [obj respondsToSelector: sel] ) + return [obj performSelector: sel]; + return nil; +} +#define CATCH_UNSAFE_UNRETAINED +#define CATCH_ARC_STRONG +#else +inline void arcSafeRelease( NSObject* ){} +inline id performOptionalSelector( id obj, SEL sel ) { +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Warc-performSelector-leaks" +#endif + if( [obj respondsToSelector: sel] ) + return [obj performSelector: sel]; +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + return nil; +} +#define CATCH_UNSAFE_UNRETAINED __unsafe_unretained +#define CATCH_ARC_STRONG __strong +#endif + +#endif + +#ifdef CATCH_CONFIG_CPP11_TUPLE +#include +#endif + +#ifdef CATCH_CONFIG_CPP11_IS_ENUM +#include +#endif + +namespace Catch { + +// Why we're here. +template +std::string toString( T const& value ); + +// Built in overloads + +std::string toString( std::string const& value ); +std::string toString( std::wstring const& value ); +std::string toString( const char* const value ); +std::string toString( char* const value ); +std::string toString( const wchar_t* const value ); +std::string toString( wchar_t* const value ); +std::string toString( int value ); +std::string toString( unsigned long value ); +std::string toString( unsigned int value ); +std::string toString( const double value ); +std::string toString( const float value ); +std::string toString( bool value ); +std::string toString( char value ); +std::string toString( signed char value ); +std::string toString( unsigned char value ); + +#ifdef CATCH_CONFIG_CPP11_LONG_LONG +std::string toString( long long value ); +std::string toString( unsigned long long value ); +#endif + +#ifdef CATCH_CONFIG_CPP11_NULLPTR +std::string toString( std::nullptr_t ); +#endif + +#ifdef __OBJC__ + std::string toString( NSString const * const& nsstring ); + std::string toString( NSString * CATCH_ARC_STRONG const& nsstring ); + std::string toString( NSObject* const& nsObject ); +#endif + +namespace Detail { + + extern const std::string unprintableString; + + #if !defined(CATCH_CONFIG_CPP11_STREAM_INSERTABLE_CHECK) + struct BorgType { + template BorgType( T const& ); + }; + + struct TrueType { char sizer[1]; }; + struct FalseType { char sizer[2]; }; + + TrueType& testStreamable( std::ostream& ); + FalseType testStreamable( FalseType ); + + FalseType operator<<( std::ostream const&, BorgType const& ); + + template + struct IsStreamInsertable { + static std::ostream &s; + static T const&t; + enum { value = sizeof( testStreamable(s << t) ) == sizeof( TrueType ) }; + }; +#else + template + class IsStreamInsertable { + template + static auto test(int) + -> decltype( std::declval() << std::declval(), std::true_type() ); + + template + static auto test(...) -> std::false_type; + + public: + static const bool value = decltype(test(0))::value; + }; +#endif + +#if defined(CATCH_CONFIG_CPP11_IS_ENUM) + template::value + > + struct EnumStringMaker + { + static std::string convert( T const& ) { return unprintableString; } + }; + + template + struct EnumStringMaker + { + static std::string convert( T const& v ) + { + return ::Catch::toString( + static_cast::type>(v) + ); + } + }; +#endif + template + struct StringMakerBase { +#if defined(CATCH_CONFIG_CPP11_IS_ENUM) + template + static std::string convert( T const& v ) + { + return EnumStringMaker::convert( v ); + } +#else + template + static std::string convert( T const& ) { return unprintableString; } +#endif + }; + + template<> + struct StringMakerBase { + template + static std::string convert( T const& _value ) { + std::ostringstream oss; + oss << _value; + return oss.str(); + } + }; + + std::string rawMemoryToString( const void *object, std::size_t size ); + + template + inline std::string rawMemoryToString( const T& object ) { + return rawMemoryToString( &object, sizeof(object) ); + } + +} // end namespace Detail + +template +struct StringMaker : + Detail::StringMakerBase::value> {}; + +template +struct StringMaker { + template + static std::string convert( U* p ) { + if( !p ) + return "NULL"; + else + return Detail::rawMemoryToString( p ); + } +}; + +template +struct StringMaker { + static std::string convert( R C::* p ) { + if( !p ) + return "NULL"; + else + return Detail::rawMemoryToString( p ); + } +}; + +namespace Detail { + template + std::string rangeToString( InputIterator first, InputIterator last ); +} + +//template +//struct StringMaker > { +// static std::string convert( std::vector const& v ) { +// return Detail::rangeToString( v.begin(), v.end() ); +// } +//}; + +template +std::string toString( std::vector const& v ) { + return Detail::rangeToString( v.begin(), v.end() ); +} + +#ifdef CATCH_CONFIG_CPP11_TUPLE + +// toString for tuples +namespace TupleDetail { + template< + typename Tuple, + std::size_t N = 0, + bool = (N < std::tuple_size::value) + > + struct ElementPrinter { + static void print( const Tuple& tuple, std::ostream& os ) + { + os << ( N ? ", " : " " ) + << Catch::toString(std::get(tuple)); + ElementPrinter::print(tuple,os); + } + }; + + template< + typename Tuple, + std::size_t N + > + struct ElementPrinter { + static void print( const Tuple&, std::ostream& ) {} + }; + +} + +template +struct StringMaker> { + + static std::string convert( const std::tuple& tuple ) + { + std::ostringstream os; + os << '{'; + TupleDetail::ElementPrinter>::print( tuple, os ); + os << " }"; + return os.str(); + } +}; +#endif // CATCH_CONFIG_CPP11_TUPLE + +namespace Detail { + template + std::string makeString( T const& value ) { + return StringMaker::convert( value ); + } +} // end namespace Detail + +/// \brief converts any type to a string +/// +/// The default template forwards on to ostringstream - except when an +/// ostringstream overload does not exist - in which case it attempts to detect +/// that and writes {?}. +/// Overload (not specialise) this template for custom typs that you don't want +/// to provide an ostream overload for. +template +std::string toString( T const& value ) { + return StringMaker::convert( value ); +} + + namespace Detail { + template + std::string rangeToString( InputIterator first, InputIterator last ) { + std::ostringstream oss; + oss << "{ "; + if( first != last ) { + oss << Catch::toString( *first ); + for( ++first ; first != last ; ++first ) + oss << ", " << Catch::toString( *first ); + } + oss << " }"; + return oss.str(); + } +} + +} // end namespace Catch + +namespace Catch { + +template +class BinaryExpression; + +template +class MatchExpression; + +// Wraps the LHS of an expression and overloads comparison operators +// for also capturing those and RHS (if any) +template +class ExpressionLhs : public DecomposedExpression { +public: + ExpressionLhs( ResultBuilder& rb, T lhs ) : m_rb( rb ), m_lhs( lhs ), m_truthy(false) {} + + ExpressionLhs& operator = ( const ExpressionLhs& ); + + template + BinaryExpression + operator == ( RhsT const& rhs ) { + return captureExpression( rhs ); + } + + template + BinaryExpression + operator != ( RhsT const& rhs ) { + return captureExpression( rhs ); + } + + template + BinaryExpression + operator < ( RhsT const& rhs ) { + return captureExpression( rhs ); + } + + template + BinaryExpression + operator > ( RhsT const& rhs ) { + return captureExpression( rhs ); + } + + template + BinaryExpression + operator <= ( RhsT const& rhs ) { + return captureExpression( rhs ); + } + + template + BinaryExpression + operator >= ( RhsT const& rhs ) { + return captureExpression( rhs ); + } + + BinaryExpression operator == ( bool rhs ) { + return captureExpression( rhs ); + } + + BinaryExpression operator != ( bool rhs ) { + return captureExpression( rhs ); + } + + void endExpression() { + m_truthy = m_lhs ? true : false; + m_rb + .setResultType( m_truthy ) + .endExpression( *this ); + } + + virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { + dest = Catch::toString( m_truthy ); + } + +private: + template + BinaryExpression captureExpression( RhsT& rhs ) const { + return BinaryExpression( m_rb, m_lhs, rhs ); + } + + template + BinaryExpression captureExpression( bool rhs ) const { + return BinaryExpression( m_rb, m_lhs, rhs ); + } + +private: + ResultBuilder& m_rb; + T m_lhs; + bool m_truthy; +}; + +template +class BinaryExpression : public DecomposedExpression { +public: + BinaryExpression( ResultBuilder& rb, LhsT lhs, RhsT rhs ) + : m_rb( rb ), m_lhs( lhs ), m_rhs( rhs ) {} + + BinaryExpression& operator = ( BinaryExpression& ); + + void endExpression() const { + m_rb + .setResultType( Internal::compare( m_lhs, m_rhs ) ) + .endExpression( *this ); + } + + virtual bool isBinaryExpression() const CATCH_OVERRIDE { + return true; + } + + virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { + std::string lhs = Catch::toString( m_lhs ); + std::string rhs = Catch::toString( m_rhs ); + char delim = lhs.size() + rhs.size() < 40 && + lhs.find('\n') == std::string::npos && + rhs.find('\n') == std::string::npos ? ' ' : '\n'; + dest.reserve( 7 + lhs.size() + rhs.size() ); + // 2 for spaces around operator + // 2 for operator + // 2 for parentheses (conditionally added later) + // 1 for negation (conditionally added later) + dest = lhs; + dest += delim; + dest += Internal::OperatorTraits::getName(); + dest += delim; + dest += rhs; + } + +private: + ResultBuilder& m_rb; + LhsT m_lhs; + RhsT m_rhs; +}; + +template +class MatchExpression : public DecomposedExpression { +public: + MatchExpression( ArgT arg, MatcherT matcher, char const* matcherString ) + : m_arg( arg ), m_matcher( matcher ), m_matcherString( matcherString ) {} + + virtual bool isBinaryExpression() const CATCH_OVERRIDE { + return true; + } + + virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { + std::string matcherAsString = m_matcher.toString(); + dest = Catch::toString( m_arg ); + dest += ' '; + if( matcherAsString == Detail::unprintableString ) + dest += m_matcherString; + else + dest += matcherAsString; + } + +private: + ArgT m_arg; + MatcherT m_matcher; + char const* m_matcherString; +}; + +} // end namespace Catch + + +namespace Catch { + + template + inline ExpressionLhs ResultBuilder::operator <= ( T const& operand ) { + return ExpressionLhs( *this, operand ); + } + + inline ExpressionLhs ResultBuilder::operator <= ( bool value ) { + return ExpressionLhs( *this, value ); + } + + template + inline void ResultBuilder::captureMatch( ArgT const& arg, MatcherT const& matcher, + char const* matcherString ) { + MatchExpression expr( arg, matcher, matcherString ); + setResultType( matcher.match( arg ) ); + endExpression( expr ); + } + +} // namespace Catch + +// #included from: catch_message.h +#define TWOBLUECUBES_CATCH_MESSAGE_H_INCLUDED + +#include + +namespace Catch { + + struct MessageInfo { + MessageInfo( std::string const& _macroName, + SourceLineInfo const& _lineInfo, + ResultWas::OfType _type ); + + std::string macroName; + SourceLineInfo lineInfo; + ResultWas::OfType type; + std::string message; + unsigned int sequence; + + bool operator == ( MessageInfo const& other ) const { + return sequence == other.sequence; + } + bool operator < ( MessageInfo const& other ) const { + return sequence < other.sequence; + } + private: + static unsigned int globalCount; + }; + + struct MessageBuilder { + MessageBuilder( std::string const& macroName, + SourceLineInfo const& lineInfo, + ResultWas::OfType type ) + : m_info( macroName, lineInfo, type ) + {} + + template + MessageBuilder& operator << ( T const& value ) { + m_stream << value; + return *this; + } + + MessageInfo m_info; + std::ostringstream m_stream; + }; + + class ScopedMessage { + public: + ScopedMessage( MessageBuilder const& builder ); + ScopedMessage( ScopedMessage const& other ); + ~ScopedMessage(); + + MessageInfo m_info; + }; + +} // end namespace Catch + +// #included from: catch_interfaces_capture.h +#define TWOBLUECUBES_CATCH_INTERFACES_CAPTURE_H_INCLUDED + +#include + +namespace Catch { + + class TestCase; + class AssertionResult; + struct AssertionInfo; + struct SectionInfo; + struct SectionEndInfo; + struct MessageInfo; + class ScopedMessageBuilder; + struct Counts; + + struct IResultCapture { + + virtual ~IResultCapture(); + + virtual void assertionEnded( AssertionResult const& result ) = 0; + virtual bool sectionStarted( SectionInfo const& sectionInfo, + Counts& assertions ) = 0; + virtual void sectionEnded( SectionEndInfo const& endInfo ) = 0; + virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) = 0; + virtual void pushScopedMessage( MessageInfo const& message ) = 0; + virtual void popScopedMessage( MessageInfo const& message ) = 0; + + virtual std::string getCurrentTestName() const = 0; + virtual const AssertionResult* getLastResult() const = 0; + + virtual void exceptionEarlyReported() = 0; + + virtual void handleFatalErrorCondition( std::string const& message ) = 0; + }; + + IResultCapture& getResultCapture(); +} + +// #included from: catch_debugger.h +#define TWOBLUECUBES_CATCH_DEBUGGER_H_INCLUDED + +// #included from: catch_platform.h +#define TWOBLUECUBES_CATCH_PLATFORM_H_INCLUDED + +#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) +# define CATCH_PLATFORM_MAC +#elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED) +# define CATCH_PLATFORM_IPHONE +#elif defined(linux) || defined(__linux) || defined(__linux__) +# define CATCH_PLATFORM_LINUX +#elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) +# define CATCH_PLATFORM_WINDOWS +# if !defined(NOMINMAX) && !defined(CATCH_CONFIG_NO_NOMINMAX) +# define CATCH_DEFINES_NOMINMAX +# endif +# if !defined(WIN32_LEAN_AND_MEAN) && !defined(CATCH_CONFIG_NO_WIN32_LEAN_AND_MEAN) +# define CATCH_DEFINES_WIN32_LEAN_AND_MEAN +# endif +#endif + +#include + +namespace Catch{ + + bool isDebuggerActive(); + void writeToDebugConsole( std::string const& text ); +} + +#ifdef CATCH_PLATFORM_MAC + + // The following code snippet based on: + // http://cocoawithlove.com/2008/03/break-into-debugger.html + #if defined(__ppc64__) || defined(__ppc__) + #define CATCH_TRAP() \ + __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n" \ + : : : "memory","r0","r3","r4" ) + #else + #define CATCH_TRAP() __asm__("int $3\n" : : ) + #endif + +#elif defined(CATCH_PLATFORM_LINUX) + // If we can use inline assembler, do it because this allows us to break + // directly at the location of the failing check instead of breaking inside + // raise() called from it, i.e. one stack frame below. + #if defined(__GNUC__) && (defined(__i386) || defined(__x86_64)) + #define CATCH_TRAP() asm volatile ("int $3") + #else // Fall back to the generic way. + #include + + #define CATCH_TRAP() raise(SIGTRAP) + #endif +#elif defined(_MSC_VER) + #define CATCH_TRAP() __debugbreak() +#elif defined(__MINGW32__) + extern "C" __declspec(dllimport) void __stdcall DebugBreak(); + #define CATCH_TRAP() DebugBreak() +#endif + +#ifdef CATCH_TRAP + #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) { CATCH_TRAP(); } +#else + #define CATCH_BREAK_INTO_DEBUGGER() Catch::alwaysTrue(); +#endif + +// #included from: catch_interfaces_runner.h +#define TWOBLUECUBES_CATCH_INTERFACES_RUNNER_H_INCLUDED + +namespace Catch { + class TestCase; + + struct IRunner { + virtual ~IRunner(); + virtual bool aborting() const = 0; + }; +} + +#if defined(CATCH_CONFIG_FAST_COMPILE) +/////////////////////////////////////////////////////////////////////////////// +// We can speedup compilation significantly by breaking into debugger lower in +// the callstack, because then we don't have to expand CATCH_BREAK_INTO_DEBUGGER +// macro in each assertion +#define INTERNAL_CATCH_REACT( resultBuilder ) \ + resultBuilder.react(); + +/////////////////////////////////////////////////////////////////////////////// +// Another way to speed-up compilation is to omit local try-catch for REQUIRE* +// macros. +// This can potentially cause false negative, if the test code catches +// the exception before it propagates back up to the runner. +#define INTERNAL_CATCH_TEST_NO_TRY( macroName, resultDisposition, expr ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ + __catchResult.setExceptionGuard(); \ + CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ + ( __catchResult <= expr ).endExpression(); \ + CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ + __catchResult.unsetExceptionGuard(); \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look +// The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. + +#define INTERNAL_CHECK_THAT_NO_TRY( macroName, matcher, resultDisposition, arg ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ + __catchResult.setExceptionGuard(); \ + __catchResult.captureMatch( arg, matcher, #matcher ); \ + __catchResult.unsetExceptionGuard(); \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) + +#else +/////////////////////////////////////////////////////////////////////////////// +// In the event of a failure works out if the debugger needs to be invoked +// and/or an exception thrown and takes appropriate action. +// This needs to be done as a macro so the debugger will stop in the user +// source code rather than in Catch library code +#define INTERNAL_CATCH_REACT( resultBuilder ) \ + if( resultBuilder.shouldDebugBreak() ) CATCH_BREAK_INTO_DEBUGGER(); \ + resultBuilder.react(); +#endif + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ + try { \ + CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ + ( __catchResult <= expr ).endExpression(); \ + CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ + } \ + catch( ... ) { \ + __catchResult.useActiveException( resultDisposition ); \ + } \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look + // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_IF( macroName, resultDisposition, expr ) \ + INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ + if( Catch::getResultCapture().getLastResult()->succeeded() ) + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_ELSE( macroName, resultDisposition, expr ) \ + INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ + if( !Catch::getResultCapture().getLastResult()->succeeded() ) + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_NO_THROW( macroName, resultDisposition, expr ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ + try { \ + static_cast(expr); \ + __catchResult.captureResult( Catch::ResultWas::Ok ); \ + } \ + catch( ... ) { \ + __catchResult.useActiveException( resultDisposition ); \ + } \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_THROWS( macroName, resultDisposition, matcher, expr ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition, #matcher ); \ + if( __catchResult.allowThrows() ) \ + try { \ + static_cast(expr); \ + __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ + } \ + catch( ... ) { \ + __catchResult.captureExpectedException( matcher ); \ + } \ + else \ + __catchResult.captureResult( Catch::ResultWas::Ok ); \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_THROWS_AS( macroName, exceptionType, resultDisposition, expr ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr ", " #exceptionType, resultDisposition ); \ + if( __catchResult.allowThrows() ) \ + try { \ + static_cast(expr); \ + __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ + } \ + catch( exceptionType ) { \ + __catchResult.captureResult( Catch::ResultWas::Ok ); \ + } \ + catch( ... ) { \ + __catchResult.useActiveException( resultDisposition ); \ + } \ + else \ + __catchResult.captureResult( Catch::ResultWas::Ok ); \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) + +/////////////////////////////////////////////////////////////////////////////// +#ifdef CATCH_CONFIG_VARIADIC_MACROS + #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, ... ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ + __catchResult << __VA_ARGS__ + ::Catch::StreamEndStop(); \ + __catchResult.captureResult( messageType ); \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) +#else + #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, log ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ + __catchResult << log + ::Catch::StreamEndStop(); \ + __catchResult.captureResult( messageType ); \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) +#endif + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_INFO( macroName, log ) \ + Catch::ScopedMessage INTERNAL_CATCH_UNIQUE_NAME( scopedMessage ) = Catch::MessageBuilder( macroName, CATCH_INTERNAL_LINEINFO, Catch::ResultWas::Info ) << log; + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CHECK_THAT( macroName, matcher, resultDisposition, arg ) \ + do { \ + Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ + try { \ + __catchResult.captureMatch( arg, matcher, #matcher ); \ + } catch( ... ) { \ + __catchResult.useActiveException( resultDisposition | Catch::ResultDisposition::ContinueOnFailure ); \ + } \ + INTERNAL_CATCH_REACT( __catchResult ) \ + } while( Catch::alwaysFalse() ) + +// #included from: internal/catch_section.h +#define TWOBLUECUBES_CATCH_SECTION_H_INCLUDED + +// #included from: catch_section_info.h +#define TWOBLUECUBES_CATCH_SECTION_INFO_H_INCLUDED + +// #included from: catch_totals.hpp +#define TWOBLUECUBES_CATCH_TOTALS_HPP_INCLUDED + +#include + +namespace Catch { + + struct Counts { + Counts() : passed( 0 ), failed( 0 ), failedButOk( 0 ) {} + + Counts operator - ( Counts const& other ) const { + Counts diff; + diff.passed = passed - other.passed; + diff.failed = failed - other.failed; + diff.failedButOk = failedButOk - other.failedButOk; + return diff; + } + Counts& operator += ( Counts const& other ) { + passed += other.passed; + failed += other.failed; + failedButOk += other.failedButOk; + return *this; + } + + std::size_t total() const { + return passed + failed + failedButOk; + } + bool allPassed() const { + return failed == 0 && failedButOk == 0; + } + bool allOk() const { + return failed == 0; + } + + std::size_t passed; + std::size_t failed; + std::size_t failedButOk; + }; + + struct Totals { + + Totals operator - ( Totals const& other ) const { + Totals diff; + diff.assertions = assertions - other.assertions; + diff.testCases = testCases - other.testCases; + return diff; + } + + Totals delta( Totals const& prevTotals ) const { + Totals diff = *this - prevTotals; + if( diff.assertions.failed > 0 ) + ++diff.testCases.failed; + else if( diff.assertions.failedButOk > 0 ) + ++diff.testCases.failedButOk; + else + ++diff.testCases.passed; + return diff; + } + + Totals& operator += ( Totals const& other ) { + assertions += other.assertions; + testCases += other.testCases; + return *this; + } + + Counts assertions; + Counts testCases; + }; +} + +#include + +namespace Catch { + + struct SectionInfo { + SectionInfo + ( SourceLineInfo const& _lineInfo, + std::string const& _name, + std::string const& _description = std::string() ); + + std::string name; + std::string description; + SourceLineInfo lineInfo; + }; + + struct SectionEndInfo { + SectionEndInfo( SectionInfo const& _sectionInfo, Counts const& _prevAssertions, double _durationInSeconds ) + : sectionInfo( _sectionInfo ), prevAssertions( _prevAssertions ), durationInSeconds( _durationInSeconds ) + {} + + SectionInfo sectionInfo; + Counts prevAssertions; + double durationInSeconds; + }; + +} // end namespace Catch + +// #included from: catch_timer.h +#define TWOBLUECUBES_CATCH_TIMER_H_INCLUDED + +#ifdef _MSC_VER + +namespace Catch { + typedef unsigned long long UInt64; +} +#else +#include +namespace Catch { + typedef uint64_t UInt64; +} +#endif + +namespace Catch { + class Timer { + public: + Timer() : m_ticks( 0 ) {} + void start(); + unsigned int getElapsedMicroseconds() const; + unsigned int getElapsedMilliseconds() const; + double getElapsedSeconds() const; + + private: + UInt64 m_ticks; + }; + +} // namespace Catch + +#include + +namespace Catch { + + class Section : NonCopyable { + public: + Section( SectionInfo const& info ); + ~Section(); + + // This indicates whether the section should be executed or not + operator bool() const; + + private: + SectionInfo m_info; + + std::string m_name; + Counts m_assertions; + bool m_sectionIncluded; + Timer m_timer; + }; + +} // end namespace Catch + +#ifdef CATCH_CONFIG_VARIADIC_MACROS + #define INTERNAL_CATCH_SECTION( ... ) \ + if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, __VA_ARGS__ ) ) +#else + #define INTERNAL_CATCH_SECTION( name, desc ) \ + if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, name, desc ) ) +#endif + +// #included from: internal/catch_generators.hpp +#define TWOBLUECUBES_CATCH_GENERATORS_HPP_INCLUDED + +#include +#include +#include + +namespace Catch { + +template +struct IGenerator { + virtual ~IGenerator() {} + virtual T getValue( std::size_t index ) const = 0; + virtual std::size_t size () const = 0; +}; + +template +class BetweenGenerator : public IGenerator { +public: + BetweenGenerator( T from, T to ) : m_from( from ), m_to( to ){} + + virtual T getValue( std::size_t index ) const { + return m_from+static_cast( index ); + } + + virtual std::size_t size() const { + return static_cast( 1+m_to-m_from ); + } + +private: + + T m_from; + T m_to; +}; + +template +class ValuesGenerator : public IGenerator { +public: + ValuesGenerator(){} + + void add( T value ) { + m_values.push_back( value ); + } + + virtual T getValue( std::size_t index ) const { + return m_values[index]; + } + + virtual std::size_t size() const { + return m_values.size(); + } + +private: + std::vector m_values; +}; + +template +class CompositeGenerator { +public: + CompositeGenerator() : m_totalSize( 0 ) {} + + // *** Move semantics, similar to auto_ptr *** + CompositeGenerator( CompositeGenerator& other ) + : m_fileInfo( other.m_fileInfo ), + m_totalSize( 0 ) + { + move( other ); + } + + CompositeGenerator& setFileInfo( const char* fileInfo ) { + m_fileInfo = fileInfo; + return *this; + } + + ~CompositeGenerator() { + deleteAll( m_composed ); + } + + operator T () const { + size_t overallIndex = getCurrentContext().getGeneratorIndex( m_fileInfo, m_totalSize ); + + typename std::vector*>::const_iterator it = m_composed.begin(); + typename std::vector*>::const_iterator itEnd = m_composed.end(); + for( size_t index = 0; it != itEnd; ++it ) + { + const IGenerator* generator = *it; + if( overallIndex >= index && overallIndex < index + generator->size() ) + { + return generator->getValue( overallIndex-index ); + } + index += generator->size(); + } + CATCH_INTERNAL_ERROR( "Indexed past end of generated range" ); + return T(); // Suppress spurious "not all control paths return a value" warning in Visual Studio - if you know how to fix this please do so + } + + void add( const IGenerator* generator ) { + m_totalSize += generator->size(); + m_composed.push_back( generator ); + } + + CompositeGenerator& then( CompositeGenerator& other ) { + move( other ); + return *this; + } + + CompositeGenerator& then( T value ) { + ValuesGenerator* valuesGen = new ValuesGenerator(); + valuesGen->add( value ); + add( valuesGen ); + return *this; + } + +private: + + void move( CompositeGenerator& other ) { + m_composed.insert( m_composed.end(), other.m_composed.begin(), other.m_composed.end() ); + m_totalSize += other.m_totalSize; + other.m_composed.clear(); + } + + std::vector*> m_composed; + std::string m_fileInfo; + size_t m_totalSize; +}; + +namespace Generators +{ + template + CompositeGenerator between( T from, T to ) { + CompositeGenerator generators; + generators.add( new BetweenGenerator( from, to ) ); + return generators; + } + + template + CompositeGenerator values( T val1, T val2 ) { + CompositeGenerator generators; + ValuesGenerator* valuesGen = new ValuesGenerator(); + valuesGen->add( val1 ); + valuesGen->add( val2 ); + generators.add( valuesGen ); + return generators; + } + + template + CompositeGenerator values( T val1, T val2, T val3 ){ + CompositeGenerator generators; + ValuesGenerator* valuesGen = new ValuesGenerator(); + valuesGen->add( val1 ); + valuesGen->add( val2 ); + valuesGen->add( val3 ); + generators.add( valuesGen ); + return generators; + } + + template + CompositeGenerator values( T val1, T val2, T val3, T val4 ) { + CompositeGenerator generators; + ValuesGenerator* valuesGen = new ValuesGenerator(); + valuesGen->add( val1 ); + valuesGen->add( val2 ); + valuesGen->add( val3 ); + valuesGen->add( val4 ); + generators.add( valuesGen ); + return generators; + } + +} // end namespace Generators + +using namespace Generators; + +} // end namespace Catch + +#define INTERNAL_CATCH_LINESTR2( line ) #line +#define INTERNAL_CATCH_LINESTR( line ) INTERNAL_CATCH_LINESTR2( line ) + +#define INTERNAL_CATCH_GENERATE( expr ) expr.setFileInfo( __FILE__ "(" INTERNAL_CATCH_LINESTR( __LINE__ ) ")" ) + +// #included from: internal/catch_interfaces_exception.h +#define TWOBLUECUBES_CATCH_INTERFACES_EXCEPTION_H_INCLUDED + +#include +#include + +// #included from: catch_interfaces_registry_hub.h +#define TWOBLUECUBES_CATCH_INTERFACES_REGISTRY_HUB_H_INCLUDED + +#include + +namespace Catch { + + class TestCase; + struct ITestCaseRegistry; + struct IExceptionTranslatorRegistry; + struct IExceptionTranslator; + struct IReporterRegistry; + struct IReporterFactory; + struct ITagAliasRegistry; + + struct IRegistryHub { + virtual ~IRegistryHub(); + + virtual IReporterRegistry const& getReporterRegistry() const = 0; + virtual ITestCaseRegistry const& getTestCaseRegistry() const = 0; + virtual ITagAliasRegistry const& getTagAliasRegistry() const = 0; + + virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() = 0; + }; + + struct IMutableRegistryHub { + virtual ~IMutableRegistryHub(); + virtual void registerReporter( std::string const& name, Ptr const& factory ) = 0; + virtual void registerListener( Ptr const& factory ) = 0; + virtual void registerTest( TestCase const& testInfo ) = 0; + virtual void registerTranslator( const IExceptionTranslator* translator ) = 0; + virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) = 0; + }; + + IRegistryHub& getRegistryHub(); + IMutableRegistryHub& getMutableRegistryHub(); + void cleanUp(); + std::string translateActiveException(); + +} + +namespace Catch { + + typedef std::string(*exceptionTranslateFunction)(); + + struct IExceptionTranslator; + typedef std::vector ExceptionTranslators; + + struct IExceptionTranslator { + virtual ~IExceptionTranslator(); + virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const = 0; + }; + + struct IExceptionTranslatorRegistry { + virtual ~IExceptionTranslatorRegistry(); + + virtual std::string translateActiveException() const = 0; + }; + + class ExceptionTranslatorRegistrar { + template + class ExceptionTranslator : public IExceptionTranslator { + public: + + ExceptionTranslator( std::string(*translateFunction)( T& ) ) + : m_translateFunction( translateFunction ) + {} + + virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const CATCH_OVERRIDE { + try { + if( it == itEnd ) + throw; + else + return (*it)->translate( it+1, itEnd ); + } + catch( T& ex ) { + return m_translateFunction( ex ); + } + } + + protected: + std::string(*m_translateFunction)( T& ); + }; + + public: + template + ExceptionTranslatorRegistrar( std::string(*translateFunction)( T& ) ) { + getMutableRegistryHub().registerTranslator + ( new ExceptionTranslator( translateFunction ) ); + } + }; +} + +/////////////////////////////////////////////////////////////////////////////// +#define INTERNAL_CATCH_TRANSLATE_EXCEPTION2( translatorName, signature ) \ + static std::string translatorName( signature ); \ + namespace{ Catch::ExceptionTranslatorRegistrar INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionRegistrar )( &translatorName ); }\ + static std::string translatorName( signature ) + +#define INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION2( INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator ), signature ) + +// #included from: internal/catch_approx.hpp +#define TWOBLUECUBES_CATCH_APPROX_HPP_INCLUDED + +#include +#include + +#if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) +#include +#endif + +namespace Catch { +namespace Detail { + + class Approx { + public: + explicit Approx ( double value ) + : m_epsilon( std::numeric_limits::epsilon()*100 ), + m_margin( 0.0 ), + m_scale( 1.0 ), + m_value( value ) + {} + + Approx( Approx const& other ) + : m_epsilon( other.m_epsilon ), + m_margin( other.m_margin ), + m_scale( other.m_scale ), + m_value( other.m_value ) + {} + + static Approx custom() { + return Approx( 0 ); + } + +#if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) + + template ::value>::type> + Approx operator()( T value ) { + Approx approx( static_cast(value) ); + approx.epsilon( m_epsilon ); + approx.margin( m_margin ); + approx.scale( m_scale ); + return approx; + } + + template ::value>::type> + explicit Approx( T value ): Approx(static_cast(value)) + {} + + template ::value>::type> + friend bool operator == ( const T& lhs, Approx const& rhs ) { + // Thanks to Richard Harris for his help refining this formula + auto lhs_v = double(lhs); + bool relativeOK = std::fabs(lhs_v - rhs.m_value) < rhs.m_epsilon * (rhs.m_scale + (std::max)(std::fabs(lhs_v), std::fabs(rhs.m_value))); + if (relativeOK) { + return true; + } + return std::fabs(lhs_v - rhs.m_value) < rhs.m_margin; + } + + template ::value>::type> + friend bool operator == ( Approx const& lhs, const T& rhs ) { + return operator==( rhs, lhs ); + } + + template ::value>::type> + friend bool operator != ( T lhs, Approx const& rhs ) { + return !operator==( lhs, rhs ); + } + + template ::value>::type> + friend bool operator != ( Approx const& lhs, T rhs ) { + return !operator==( rhs, lhs ); + } + + template ::value>::type> + friend bool operator <= ( T lhs, Approx const& rhs ) { + return double(lhs) < rhs.m_value || lhs == rhs; + } + + template ::value>::type> + friend bool operator <= ( Approx const& lhs, T rhs ) { + return lhs.m_value < double(rhs) || lhs == rhs; + } + + template ::value>::type> + friend bool operator >= ( T lhs, Approx const& rhs ) { + return double(lhs) > rhs.m_value || lhs == rhs; + } + + template ::value>::type> + friend bool operator >= ( Approx const& lhs, T rhs ) { + return lhs.m_value > double(rhs) || lhs == rhs; + } + + template ::value>::type> + Approx& epsilon( T newEpsilon ) { + m_epsilon = double(newEpsilon); + return *this; + } + + template ::value>::type> + Approx& margin( T newMargin ) { + m_margin = double(newMargin); + return *this; + } + + template ::value>::type> + Approx& scale( T newScale ) { + m_scale = double(newScale); + return *this; + } + +#else + + Approx operator()( double value ) { + Approx approx( value ); + approx.epsilon( m_epsilon ); + approx.margin( m_margin ); + approx.scale( m_scale ); + return approx; + } + + friend bool operator == ( double lhs, Approx const& rhs ) { + // Thanks to Richard Harris for his help refining this formula + bool relativeOK = std::fabs( lhs - rhs.m_value ) < rhs.m_epsilon * (rhs.m_scale + (std::max)( std::fabs(lhs), std::fabs(rhs.m_value) ) ); + if (relativeOK) { + return true; + } + return std::fabs(lhs - rhs.m_value) < rhs.m_margin; + } + + friend bool operator == ( Approx const& lhs, double rhs ) { + return operator==( rhs, lhs ); + } + + friend bool operator != ( double lhs, Approx const& rhs ) { + return !operator==( lhs, rhs ); + } + + friend bool operator != ( Approx const& lhs, double rhs ) { + return !operator==( rhs, lhs ); + } + + friend bool operator <= ( double lhs, Approx const& rhs ) { + return lhs < rhs.m_value || lhs == rhs; + } + + friend bool operator <= ( Approx const& lhs, double rhs ) { + return lhs.m_value < rhs || lhs == rhs; + } + + friend bool operator >= ( double lhs, Approx const& rhs ) { + return lhs > rhs.m_value || lhs == rhs; + } + + friend bool operator >= ( Approx const& lhs, double rhs ) { + return lhs.m_value > rhs || lhs == rhs; + } + + Approx& epsilon( double newEpsilon ) { + m_epsilon = newEpsilon; + return *this; + } + + Approx& margin( double newMargin ) { + m_margin = newMargin; + return *this; + } + + Approx& scale( double newScale ) { + m_scale = newScale; + return *this; + } +#endif + + std::string toString() const { + std::ostringstream oss; + oss << "Approx( " << Catch::toString( m_value ) << " )"; + return oss.str(); + } + + private: + double m_epsilon; + double m_margin; + double m_scale; + double m_value; + }; +} + +template<> +inline std::string toString( Detail::Approx const& value ) { + return value.toString(); +} + +} // end namespace Catch + +// #included from: internal/catch_matchers_string.h +#define TWOBLUECUBES_CATCH_MATCHERS_STRING_H_INCLUDED + +namespace Catch { +namespace Matchers { + + namespace StdString { + + struct CasedString + { + CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ); + std::string adjustString( std::string const& str ) const; + std::string caseSensitivitySuffix() const; + + CaseSensitive::Choice m_caseSensitivity; + std::string m_str; + }; + + struct StringMatcherBase : MatcherBase { + StringMatcherBase( std::string const& operation, CasedString const& comparator ); + virtual std::string describe() const CATCH_OVERRIDE; + + CasedString m_comparator; + std::string m_operation; + }; + + struct EqualsMatcher : StringMatcherBase { + EqualsMatcher( CasedString const& comparator ); + virtual bool match( std::string const& source ) const CATCH_OVERRIDE; + }; + struct ContainsMatcher : StringMatcherBase { + ContainsMatcher( CasedString const& comparator ); + virtual bool match( std::string const& source ) const CATCH_OVERRIDE; + }; + struct StartsWithMatcher : StringMatcherBase { + StartsWithMatcher( CasedString const& comparator ); + virtual bool match( std::string const& source ) const CATCH_OVERRIDE; + }; + struct EndsWithMatcher : StringMatcherBase { + EndsWithMatcher( CasedString const& comparator ); + virtual bool match( std::string const& source ) const CATCH_OVERRIDE; + }; + + } // namespace StdString + + // The following functions create the actual matcher objects. + // This allows the types to be inferred + + StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); + StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); + StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); + StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); + +} // namespace Matchers +} // namespace Catch + +// #included from: internal/catch_matchers_vector.h +#define TWOBLUECUBES_CATCH_MATCHERS_VECTOR_H_INCLUDED + +namespace Catch { +namespace Matchers { + + namespace Vector { + + template + struct ContainsElementMatcher : MatcherBase, T> { + + ContainsElementMatcher(T const &comparator) : m_comparator( comparator) {} + + bool match(std::vector const &v) const CATCH_OVERRIDE { + return std::find(v.begin(), v.end(), m_comparator) != v.end(); + } + + virtual std::string describe() const CATCH_OVERRIDE { + return "Contains: " + Catch::toString( m_comparator ); + } + + T const& m_comparator; + }; + + template + struct ContainsMatcher : MatcherBase, std::vector > { + + ContainsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} + + bool match(std::vector const &v) const CATCH_OVERRIDE { + // !TBD: see note in EqualsMatcher + if (m_comparator.size() > v.size()) + return false; + for (size_t i = 0; i < m_comparator.size(); ++i) + if (std::find(v.begin(), v.end(), m_comparator[i]) == v.end()) + return false; + return true; + } + virtual std::string describe() const CATCH_OVERRIDE { + return "Contains: " + Catch::toString( m_comparator ); + } + + std::vector const& m_comparator; + }; + + template + struct EqualsMatcher : MatcherBase, std::vector > { + + EqualsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} + + bool match(std::vector const &v) const CATCH_OVERRIDE { + // !TBD: This currently works if all elements can be compared using != + // - a more general approach would be via a compare template that defaults + // to using !=. but could be specialised for, e.g. std::vector etc + // - then just call that directly + if (m_comparator.size() != v.size()) + return false; + for (size_t i = 0; i < v.size(); ++i) + if (m_comparator[i] != v[i]) + return false; + return true; + } + virtual std::string describe() const CATCH_OVERRIDE { + return "Equals: " + Catch::toString( m_comparator ); + } + std::vector const& m_comparator; + }; + + } // namespace Vector + + // The following functions create the actual matcher objects. + // This allows the types to be inferred + + template + Vector::ContainsMatcher Contains( std::vector const& comparator ) { + return Vector::ContainsMatcher( comparator ); + } + + template + Vector::ContainsElementMatcher VectorContains( T const& comparator ) { + return Vector::ContainsElementMatcher( comparator ); + } + + template + Vector::EqualsMatcher Equals( std::vector const& comparator ) { + return Vector::EqualsMatcher( comparator ); + } + +} // namespace Matchers +} // namespace Catch + +// #included from: internal/catch_interfaces_tag_alias_registry.h +#define TWOBLUECUBES_CATCH_INTERFACES_TAG_ALIAS_REGISTRY_H_INCLUDED + +// #included from: catch_tag_alias.h +#define TWOBLUECUBES_CATCH_TAG_ALIAS_H_INCLUDED + +#include + +namespace Catch { + + struct TagAlias { + TagAlias( std::string const& _tag, SourceLineInfo _lineInfo ) : tag( _tag ), lineInfo( _lineInfo ) {} + + std::string tag; + SourceLineInfo lineInfo; + }; + + struct RegistrarForTagAliases { + RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ); + }; + +} // end namespace Catch + +#define CATCH_REGISTER_TAG_ALIAS( alias, spec ) namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); } +// #included from: catch_option.hpp +#define TWOBLUECUBES_CATCH_OPTION_HPP_INCLUDED + +namespace Catch { + + // An optional type + template + class Option { + public: + Option() : nullableValue( CATCH_NULL ) {} + Option( T const& _value ) + : nullableValue( new( storage ) T( _value ) ) + {} + Option( Option const& _other ) + : nullableValue( _other ? new( storage ) T( *_other ) : CATCH_NULL ) + {} + + ~Option() { + reset(); + } + + Option& operator= ( Option const& _other ) { + if( &_other != this ) { + reset(); + if( _other ) + nullableValue = new( storage ) T( *_other ); + } + return *this; + } + Option& operator = ( T const& _value ) { + reset(); + nullableValue = new( storage ) T( _value ); + return *this; + } + + void reset() { + if( nullableValue ) + nullableValue->~T(); + nullableValue = CATCH_NULL; + } + + T& operator*() { return *nullableValue; } + T const& operator*() const { return *nullableValue; } + T* operator->() { return nullableValue; } + const T* operator->() const { return nullableValue; } + + T valueOr( T const& defaultValue ) const { + return nullableValue ? *nullableValue : defaultValue; + } + + bool some() const { return nullableValue != CATCH_NULL; } + bool none() const { return nullableValue == CATCH_NULL; } + + bool operator !() const { return nullableValue == CATCH_NULL; } + operator SafeBool::type() const { + return SafeBool::makeSafe( some() ); + } + + private: + T *nullableValue; + union { + char storage[sizeof(T)]; + + // These are here to force alignment for the storage + long double dummy1; + void (*dummy2)(); + long double dummy3; +#ifdef CATCH_CONFIG_CPP11_LONG_LONG + long long dummy4; +#endif + }; + }; + +} // end namespace Catch + +namespace Catch { + + struct ITagAliasRegistry { + virtual ~ITagAliasRegistry(); + virtual Option find( std::string const& alias ) const = 0; + virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const = 0; + + static ITagAliasRegistry const& get(); + }; + +} // end namespace Catch + +// These files are included here so the single_include script doesn't put them +// in the conditionally compiled sections +// #included from: internal/catch_test_case_info.h +#define TWOBLUECUBES_CATCH_TEST_CASE_INFO_H_INCLUDED + +#include +#include + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpadded" +#endif + +namespace Catch { + + struct ITestCase; + + struct TestCaseInfo { + enum SpecialProperties{ + None = 0, + IsHidden = 1 << 1, + ShouldFail = 1 << 2, + MayFail = 1 << 3, + Throws = 1 << 4, + NonPortable = 1 << 5 + }; + + TestCaseInfo( std::string const& _name, + std::string const& _className, + std::string const& _description, + std::set const& _tags, + SourceLineInfo const& _lineInfo ); + + TestCaseInfo( TestCaseInfo const& other ); + + friend void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ); + + bool isHidden() const; + bool throws() const; + bool okToFail() const; + bool expectedToFail() const; + + std::string name; + std::string className; + std::string description; + std::set tags; + std::set lcaseTags; + std::string tagsAsString; + SourceLineInfo lineInfo; + SpecialProperties properties; + }; + + class TestCase : public TestCaseInfo { + public: + + TestCase( ITestCase* testCase, TestCaseInfo const& info ); + TestCase( TestCase const& other ); + + TestCase withName( std::string const& _newName ) const; + + void invoke() const; + + TestCaseInfo const& getTestCaseInfo() const; + + void swap( TestCase& other ); + bool operator == ( TestCase const& other ) const; + bool operator < ( TestCase const& other ) const; + TestCase& operator = ( TestCase const& other ); + + private: + Ptr test; + }; + + TestCase makeTestCase( ITestCase* testCase, + std::string const& className, + std::string const& name, + std::string const& description, + SourceLineInfo const& lineInfo ); +} + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + + +#ifdef __OBJC__ +// #included from: internal/catch_objc.hpp +#define TWOBLUECUBES_CATCH_OBJC_HPP_INCLUDED + +#import + +#include + +// NB. Any general catch headers included here must be included +// in catch.hpp first to make sure they are included by the single +// header for non obj-usage + +/////////////////////////////////////////////////////////////////////////////// +// This protocol is really only here for (self) documenting purposes, since +// all its methods are optional. +@protocol OcFixture + +@optional + +-(void) setUp; +-(void) tearDown; + +@end + +namespace Catch { + + class OcMethod : public SharedImpl { + + public: + OcMethod( Class cls, SEL sel ) : m_cls( cls ), m_sel( sel ) {} + + virtual void invoke() const { + id obj = [[m_cls alloc] init]; + + performOptionalSelector( obj, @selector(setUp) ); + performOptionalSelector( obj, m_sel ); + performOptionalSelector( obj, @selector(tearDown) ); + + arcSafeRelease( obj ); + } + private: + virtual ~OcMethod() {} + + Class m_cls; + SEL m_sel; + }; + + namespace Detail{ + + inline std::string getAnnotation( Class cls, + std::string const& annotationName, + std::string const& testCaseName ) { + NSString* selStr = [[NSString alloc] initWithFormat:@"Catch_%s_%s", annotationName.c_str(), testCaseName.c_str()]; + SEL sel = NSSelectorFromString( selStr ); + arcSafeRelease( selStr ); + id value = performOptionalSelector( cls, sel ); + if( value ) + return [(NSString*)value UTF8String]; + return ""; + } + } + + inline size_t registerTestMethods() { + size_t noTestMethods = 0; + int noClasses = objc_getClassList( CATCH_NULL, 0 ); + + Class* classes = (CATCH_UNSAFE_UNRETAINED Class *)malloc( sizeof(Class) * noClasses); + objc_getClassList( classes, noClasses ); + + for( int c = 0; c < noClasses; c++ ) { + Class cls = classes[c]; + { + u_int count; + Method* methods = class_copyMethodList( cls, &count ); + for( u_int m = 0; m < count ; m++ ) { + SEL selector = method_getName(methods[m]); + std::string methodName = sel_getName(selector); + if( startsWith( methodName, "Catch_TestCase_" ) ) { + std::string testCaseName = methodName.substr( 15 ); + std::string name = Detail::getAnnotation( cls, "Name", testCaseName ); + std::string desc = Detail::getAnnotation( cls, "Description", testCaseName ); + const char* className = class_getName( cls ); + + getMutableRegistryHub().registerTest( makeTestCase( new OcMethod( cls, selector ), className, name.c_str(), desc.c_str(), SourceLineInfo() ) ); + noTestMethods++; + } + } + free(methods); + } + } + return noTestMethods; + } + + namespace Matchers { + namespace Impl { + namespace NSStringMatchers { + + struct StringHolder : MatcherBase{ + StringHolder( NSString* substr ) : m_substr( [substr copy] ){} + StringHolder( StringHolder const& other ) : m_substr( [other.m_substr copy] ){} + StringHolder() { + arcSafeRelease( m_substr ); + } + + virtual bool match( NSString* arg ) const CATCH_OVERRIDE { + return false; + } + + NSString* m_substr; + }; + + struct Equals : StringHolder { + Equals( NSString* substr ) : StringHolder( substr ){} + + virtual bool match( NSString* str ) const CATCH_OVERRIDE { + return (str != nil || m_substr == nil ) && + [str isEqualToString:m_substr]; + } + + virtual std::string describe() const CATCH_OVERRIDE { + return "equals string: " + Catch::toString( m_substr ); + } + }; + + struct Contains : StringHolder { + Contains( NSString* substr ) : StringHolder( substr ){} + + virtual bool match( NSString* str ) const { + return (str != nil || m_substr == nil ) && + [str rangeOfString:m_substr].location != NSNotFound; + } + + virtual std::string describe() const CATCH_OVERRIDE { + return "contains string: " + Catch::toString( m_substr ); + } + }; + + struct StartsWith : StringHolder { + StartsWith( NSString* substr ) : StringHolder( substr ){} + + virtual bool match( NSString* str ) const { + return (str != nil || m_substr == nil ) && + [str rangeOfString:m_substr].location == 0; + } + + virtual std::string describe() const CATCH_OVERRIDE { + return "starts with: " + Catch::toString( m_substr ); + } + }; + struct EndsWith : StringHolder { + EndsWith( NSString* substr ) : StringHolder( substr ){} + + virtual bool match( NSString* str ) const { + return (str != nil || m_substr == nil ) && + [str rangeOfString:m_substr].location == [str length] - [m_substr length]; + } + + virtual std::string describe() const CATCH_OVERRIDE { + return "ends with: " + Catch::toString( m_substr ); + } + }; + + } // namespace NSStringMatchers + } // namespace Impl + + inline Impl::NSStringMatchers::Equals + Equals( NSString* substr ){ return Impl::NSStringMatchers::Equals( substr ); } + + inline Impl::NSStringMatchers::Contains + Contains( NSString* substr ){ return Impl::NSStringMatchers::Contains( substr ); } + + inline Impl::NSStringMatchers::StartsWith + StartsWith( NSString* substr ){ return Impl::NSStringMatchers::StartsWith( substr ); } + + inline Impl::NSStringMatchers::EndsWith + EndsWith( NSString* substr ){ return Impl::NSStringMatchers::EndsWith( substr ); } + + } // namespace Matchers + + using namespace Matchers; + +} // namespace Catch + +/////////////////////////////////////////////////////////////////////////////// +#define OC_TEST_CASE( name, desc )\ ++(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Name_test ) \ +{\ +return @ name; \ +}\ ++(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Description_test ) \ +{ \ +return @ desc; \ +} \ +-(void) INTERNAL_CATCH_UNIQUE_NAME( Catch_TestCase_test ) + +#endif + +#ifdef CATCH_IMPL + +// !TBD: Move the leak detector code into a separate header +#ifdef CATCH_CONFIG_WINDOWS_CRTDBG +#include +class LeakDetector { +public: + LeakDetector() { + int flag = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); + flag |= _CRTDBG_LEAK_CHECK_DF; + flag |= _CRTDBG_ALLOC_MEM_DF; + _CrtSetDbgFlag(flag); + _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); + _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); + // Change this to leaking allocation's number to break there + _CrtSetBreakAlloc(-1); + } +}; +#else +class LeakDetector {}; +#endif + +LeakDetector leakDetector; + +// #included from: internal/catch_impl.hpp +#define TWOBLUECUBES_CATCH_IMPL_HPP_INCLUDED + +// Collect all the implementation files together here +// These are the equivalent of what would usually be cpp files + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wweak-vtables" +#endif + +// #included from: ../catch_session.hpp +#define TWOBLUECUBES_CATCH_RUNNER_HPP_INCLUDED + +// #included from: internal/catch_commandline.hpp +#define TWOBLUECUBES_CATCH_COMMANDLINE_HPP_INCLUDED + +// #included from: catch_config.hpp +#define TWOBLUECUBES_CATCH_CONFIG_HPP_INCLUDED + +// #included from: catch_test_spec_parser.hpp +#define TWOBLUECUBES_CATCH_TEST_SPEC_PARSER_HPP_INCLUDED + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpadded" +#endif + +// #included from: catch_test_spec.hpp +#define TWOBLUECUBES_CATCH_TEST_SPEC_HPP_INCLUDED + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpadded" +#endif + +// #included from: catch_wildcard_pattern.hpp +#define TWOBLUECUBES_CATCH_WILDCARD_PATTERN_HPP_INCLUDED + +#include + +namespace Catch +{ + class WildcardPattern { + enum WildcardPosition { + NoWildcard = 0, + WildcardAtStart = 1, + WildcardAtEnd = 2, + WildcardAtBothEnds = WildcardAtStart | WildcardAtEnd + }; + + public: + + WildcardPattern( std::string const& pattern, CaseSensitive::Choice caseSensitivity ) + : m_caseSensitivity( caseSensitivity ), + m_wildcard( NoWildcard ), + m_pattern( adjustCase( pattern ) ) + { + if( startsWith( m_pattern, '*' ) ) { + m_pattern = m_pattern.substr( 1 ); + m_wildcard = WildcardAtStart; + } + if( endsWith( m_pattern, '*' ) ) { + m_pattern = m_pattern.substr( 0, m_pattern.size()-1 ); + m_wildcard = static_cast( m_wildcard | WildcardAtEnd ); + } + } + virtual ~WildcardPattern(); + virtual bool matches( std::string const& str ) const { + switch( m_wildcard ) { + case NoWildcard: + return m_pattern == adjustCase( str ); + case WildcardAtStart: + return endsWith( adjustCase( str ), m_pattern ); + case WildcardAtEnd: + return startsWith( adjustCase( str ), m_pattern ); + case WildcardAtBothEnds: + return contains( adjustCase( str ), m_pattern ); + } + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" +#endif + throw std::logic_error( "Unknown enum" ); +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + } + private: + std::string adjustCase( std::string const& str ) const { + return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; + } + CaseSensitive::Choice m_caseSensitivity; + WildcardPosition m_wildcard; + std::string m_pattern; + }; +} + +#include +#include + +namespace Catch { + + class TestSpec { + struct Pattern : SharedImpl<> { + virtual ~Pattern(); + virtual bool matches( TestCaseInfo const& testCase ) const = 0; + }; + class NamePattern : public Pattern { + public: + NamePattern( std::string const& name ) + : m_wildcardPattern( toLower( name ), CaseSensitive::No ) + {} + virtual ~NamePattern(); + virtual bool matches( TestCaseInfo const& testCase ) const { + return m_wildcardPattern.matches( toLower( testCase.name ) ); + } + private: + WildcardPattern m_wildcardPattern; + }; + + class TagPattern : public Pattern { + public: + TagPattern( std::string const& tag ) : m_tag( toLower( tag ) ) {} + virtual ~TagPattern(); + virtual bool matches( TestCaseInfo const& testCase ) const { + return testCase.lcaseTags.find( m_tag ) != testCase.lcaseTags.end(); + } + private: + std::string m_tag; + }; + + class ExcludedPattern : public Pattern { + public: + ExcludedPattern( Ptr const& underlyingPattern ) : m_underlyingPattern( underlyingPattern ) {} + virtual ~ExcludedPattern(); + virtual bool matches( TestCaseInfo const& testCase ) const { return !m_underlyingPattern->matches( testCase ); } + private: + Ptr m_underlyingPattern; + }; + + struct Filter { + std::vector > m_patterns; + + bool matches( TestCaseInfo const& testCase ) const { + // All patterns in a filter must match for the filter to be a match + for( std::vector >::const_iterator it = m_patterns.begin(), itEnd = m_patterns.end(); it != itEnd; ++it ) { + if( !(*it)->matches( testCase ) ) + return false; + } + return true; + } + }; + + public: + bool hasFilters() const { + return !m_filters.empty(); + } + bool matches( TestCaseInfo const& testCase ) const { + // A TestSpec matches if any filter matches + for( std::vector::const_iterator it = m_filters.begin(), itEnd = m_filters.end(); it != itEnd; ++it ) + if( it->matches( testCase ) ) + return true; + return false; + } + + private: + std::vector m_filters; + + friend class TestSpecParser; + }; +} + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +namespace Catch { + + class TestSpecParser { + enum Mode{ None, Name, QuotedName, Tag, EscapedName }; + Mode m_mode; + bool m_exclusion; + std::size_t m_start, m_pos; + std::string m_arg; + std::vector m_escapeChars; + TestSpec::Filter m_currentFilter; + TestSpec m_testSpec; + ITagAliasRegistry const* m_tagAliases; + + public: + TestSpecParser( ITagAliasRegistry const& tagAliases ) : m_tagAliases( &tagAliases ) {} + + TestSpecParser& parse( std::string const& arg ) { + m_mode = None; + m_exclusion = false; + m_start = std::string::npos; + m_arg = m_tagAliases->expandAliases( arg ); + m_escapeChars.clear(); + for( m_pos = 0; m_pos < m_arg.size(); ++m_pos ) + visitChar( m_arg[m_pos] ); + if( m_mode == Name ) + addPattern(); + return *this; + } + TestSpec testSpec() { + addFilter(); + return m_testSpec; + } + private: + void visitChar( char c ) { + if( m_mode == None ) { + switch( c ) { + case ' ': return; + case '~': m_exclusion = true; return; + case '[': return startNewMode( Tag, ++m_pos ); + case '"': return startNewMode( QuotedName, ++m_pos ); + case '\\': return escape(); + default: startNewMode( Name, m_pos ); break; + } + } + if( m_mode == Name ) { + if( c == ',' ) { + addPattern(); + addFilter(); + } + else if( c == '[' ) { + if( subString() == "exclude:" ) + m_exclusion = true; + else + addPattern(); + startNewMode( Tag, ++m_pos ); + } + else if( c == '\\' ) + escape(); + } + else if( m_mode == EscapedName ) + m_mode = Name; + else if( m_mode == QuotedName && c == '"' ) + addPattern(); + else if( m_mode == Tag && c == ']' ) + addPattern(); + } + void startNewMode( Mode mode, std::size_t start ) { + m_mode = mode; + m_start = start; + } + void escape() { + if( m_mode == None ) + m_start = m_pos; + m_mode = EscapedName; + m_escapeChars.push_back( m_pos ); + } + std::string subString() const { return m_arg.substr( m_start, m_pos - m_start ); } + template + void addPattern() { + std::string token = subString(); + for( size_t i = 0; i < m_escapeChars.size(); ++i ) + token = token.substr( 0, m_escapeChars[i]-m_start-i ) + token.substr( m_escapeChars[i]-m_start-i+1 ); + m_escapeChars.clear(); + if( startsWith( token, "exclude:" ) ) { + m_exclusion = true; + token = token.substr( 8 ); + } + if( !token.empty() ) { + Ptr pattern = new T( token ); + if( m_exclusion ) + pattern = new TestSpec::ExcludedPattern( pattern ); + m_currentFilter.m_patterns.push_back( pattern ); + } + m_exclusion = false; + m_mode = None; + } + void addFilter() { + if( !m_currentFilter.m_patterns.empty() ) { + m_testSpec.m_filters.push_back( m_currentFilter ); + m_currentFilter = TestSpec::Filter(); + } + } + }; + inline TestSpec parseTestSpec( std::string const& arg ) { + return TestSpecParser( ITagAliasRegistry::get() ).parse( arg ).testSpec(); + } + +} // namespace Catch + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +// #included from: catch_interfaces_config.h +#define TWOBLUECUBES_CATCH_INTERFACES_CONFIG_H_INCLUDED + +#include +#include +#include + +namespace Catch { + + struct Verbosity { enum Level { + NoOutput = 0, + Quiet, + Normal + }; }; + + struct WarnAbout { enum What { + Nothing = 0x00, + NoAssertions = 0x01 + }; }; + + struct ShowDurations { enum OrNot { + DefaultForReporter, + Always, + Never + }; }; + struct RunTests { enum InWhatOrder { + InDeclarationOrder, + InLexicographicalOrder, + InRandomOrder + }; }; + struct UseColour { enum YesOrNo { + Auto, + Yes, + No + }; }; + + class TestSpec; + + struct IConfig : IShared { + + virtual ~IConfig(); + + virtual bool allowThrows() const = 0; + virtual std::ostream& stream() const = 0; + virtual std::string name() const = 0; + virtual bool includeSuccessfulResults() const = 0; + virtual bool shouldDebugBreak() const = 0; + virtual bool warnAboutMissingAssertions() const = 0; + virtual int abortAfter() const = 0; + virtual bool showInvisibles() const = 0; + virtual ShowDurations::OrNot showDurations() const = 0; + virtual TestSpec const& testSpec() const = 0; + virtual RunTests::InWhatOrder runOrder() const = 0; + virtual unsigned int rngSeed() const = 0; + virtual UseColour::YesOrNo useColour() const = 0; + virtual std::vector const& getSectionsToRun() const = 0; + + }; +} + +// #included from: catch_stream.h +#define TWOBLUECUBES_CATCH_STREAM_H_INCLUDED + +// #included from: catch_streambuf.h +#define TWOBLUECUBES_CATCH_STREAMBUF_H_INCLUDED + +#include + +namespace Catch { + + class StreamBufBase : public std::streambuf { + public: + virtual ~StreamBufBase() CATCH_NOEXCEPT; + }; +} + +#include +#include +#include +#include + +namespace Catch { + + std::ostream& cout(); + std::ostream& cerr(); + + struct IStream { + virtual ~IStream() CATCH_NOEXCEPT; + virtual std::ostream& stream() const = 0; + }; + + class FileStream : public IStream { + mutable std::ofstream m_ofs; + public: + FileStream( std::string const& filename ); + virtual ~FileStream() CATCH_NOEXCEPT; + public: // IStream + virtual std::ostream& stream() const CATCH_OVERRIDE; + }; + + class CoutStream : public IStream { + mutable std::ostream m_os; + public: + CoutStream(); + virtual ~CoutStream() CATCH_NOEXCEPT; + + public: // IStream + virtual std::ostream& stream() const CATCH_OVERRIDE; + }; + + class DebugOutStream : public IStream { + CATCH_AUTO_PTR( StreamBufBase ) m_streamBuf; + mutable std::ostream m_os; + public: + DebugOutStream(); + virtual ~DebugOutStream() CATCH_NOEXCEPT; + + public: // IStream + virtual std::ostream& stream() const CATCH_OVERRIDE; + }; +} + +#include +#include +#include +#include + +#ifndef CATCH_CONFIG_CONSOLE_WIDTH +#define CATCH_CONFIG_CONSOLE_WIDTH 80 +#endif + +namespace Catch { + + struct ConfigData { + + ConfigData() + : listTests( false ), + listTags( false ), + listReporters( false ), + listTestNamesOnly( false ), + showSuccessfulTests( false ), + shouldDebugBreak( false ), + noThrow( false ), + showHelp( false ), + showInvisibles( false ), + filenamesAsTags( false ), + abortAfter( -1 ), + rngSeed( 0 ), + verbosity( Verbosity::Normal ), + warnings( WarnAbout::Nothing ), + showDurations( ShowDurations::DefaultForReporter ), + runOrder( RunTests::InDeclarationOrder ), + useColour( UseColour::Auto ) + {} + + bool listTests; + bool listTags; + bool listReporters; + bool listTestNamesOnly; + + bool showSuccessfulTests; + bool shouldDebugBreak; + bool noThrow; + bool showHelp; + bool showInvisibles; + bool filenamesAsTags; + + int abortAfter; + unsigned int rngSeed; + + Verbosity::Level verbosity; + WarnAbout::What warnings; + ShowDurations::OrNot showDurations; + RunTests::InWhatOrder runOrder; + UseColour::YesOrNo useColour; + + std::string outputFilename; + std::string name; + std::string processName; + + std::vector reporterNames; + std::vector testsOrTags; + std::vector sectionsToRun; + }; + + class Config : public SharedImpl { + private: + Config( Config const& other ); + Config& operator = ( Config const& other ); + virtual void dummy(); + public: + + Config() + {} + + Config( ConfigData const& data ) + : m_data( data ), + m_stream( openStream() ) + { + if( !data.testsOrTags.empty() ) { + TestSpecParser parser( ITagAliasRegistry::get() ); + for( std::size_t i = 0; i < data.testsOrTags.size(); ++i ) + parser.parse( data.testsOrTags[i] ); + m_testSpec = parser.testSpec(); + } + } + + virtual ~Config() {} + + std::string const& getFilename() const { + return m_data.outputFilename ; + } + + bool listTests() const { return m_data.listTests; } + bool listTestNamesOnly() const { return m_data.listTestNamesOnly; } + bool listTags() const { return m_data.listTags; } + bool listReporters() const { return m_data.listReporters; } + + std::string getProcessName() const { return m_data.processName; } + + std::vector const& getReporterNames() const { return m_data.reporterNames; } + std::vector const& getSectionsToRun() const CATCH_OVERRIDE { return m_data.sectionsToRun; } + + virtual TestSpec const& testSpec() const CATCH_OVERRIDE { return m_testSpec; } + + bool showHelp() const { return m_data.showHelp; } + + // IConfig interface + virtual bool allowThrows() const CATCH_OVERRIDE { return !m_data.noThrow; } + virtual std::ostream& stream() const CATCH_OVERRIDE { return m_stream->stream(); } + virtual std::string name() const CATCH_OVERRIDE { return m_data.name.empty() ? m_data.processName : m_data.name; } + virtual bool includeSuccessfulResults() const CATCH_OVERRIDE { return m_data.showSuccessfulTests; } + virtual bool warnAboutMissingAssertions() const CATCH_OVERRIDE { return m_data.warnings & WarnAbout::NoAssertions; } + virtual ShowDurations::OrNot showDurations() const CATCH_OVERRIDE { return m_data.showDurations; } + virtual RunTests::InWhatOrder runOrder() const CATCH_OVERRIDE { return m_data.runOrder; } + virtual unsigned int rngSeed() const CATCH_OVERRIDE { return m_data.rngSeed; } + virtual UseColour::YesOrNo useColour() const CATCH_OVERRIDE { return m_data.useColour; } + virtual bool shouldDebugBreak() const CATCH_OVERRIDE { return m_data.shouldDebugBreak; } + virtual int abortAfter() const CATCH_OVERRIDE { return m_data.abortAfter; } + virtual bool showInvisibles() const CATCH_OVERRIDE { return m_data.showInvisibles; } + + private: + + IStream const* openStream() { + if( m_data.outputFilename.empty() ) + return new CoutStream(); + else if( m_data.outputFilename[0] == '%' ) { + if( m_data.outputFilename == "%debug" ) + return new DebugOutStream(); + else + throw std::domain_error( "Unrecognised stream: " + m_data.outputFilename ); + } + else + return new FileStream( m_data.outputFilename ); + } + ConfigData m_data; + + CATCH_AUTO_PTR( IStream const ) m_stream; + TestSpec m_testSpec; + }; + +} // end namespace Catch + +// #included from: catch_clara.h +#define TWOBLUECUBES_CATCH_CLARA_H_INCLUDED + +// Use Catch's value for console width (store Clara's off to the side, if present) +#ifdef CLARA_CONFIG_CONSOLE_WIDTH +#define CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH CLARA_CONFIG_CONSOLE_WIDTH +#undef CLARA_CONFIG_CONSOLE_WIDTH +#endif +#define CLARA_CONFIG_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH + +// Declare Clara inside the Catch namespace +#define STITCH_CLARA_OPEN_NAMESPACE namespace Catch { +// #included from: ../external/clara.h + +// Version 0.0.2.4 + +// Only use header guard if we are not using an outer namespace +#if !defined(TWOBLUECUBES_CLARA_H_INCLUDED) || defined(STITCH_CLARA_OPEN_NAMESPACE) + +#ifndef STITCH_CLARA_OPEN_NAMESPACE +#define TWOBLUECUBES_CLARA_H_INCLUDED +#define STITCH_CLARA_OPEN_NAMESPACE +#define STITCH_CLARA_CLOSE_NAMESPACE +#else +#define STITCH_CLARA_CLOSE_NAMESPACE } +#endif + +#define STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE + +// ----------- #included from tbc_text_format.h ----------- + +// Only use header guard if we are not using an outer namespace +#if !defined(TBC_TEXT_FORMAT_H_INCLUDED) || defined(STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE) +#ifndef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE +#define TBC_TEXT_FORMAT_H_INCLUDED +#endif + +#include +#include +#include +#include +#include + +// Use optional outer namespace +#ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE +namespace STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE { +#endif + +namespace Tbc { + +#ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH + const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; +#else + const unsigned int consoleWidth = 80; +#endif + + struct TextAttributes { + TextAttributes() + : initialIndent( std::string::npos ), + indent( 0 ), + width( consoleWidth-1 ), + tabChar( '\t' ) + {} + + TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } + TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } + TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } + TextAttributes& setTabChar( char _value ) { tabChar = _value; return *this; } + + std::size_t initialIndent; // indent of first line, or npos + std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos + std::size_t width; // maximum width of text, including indent. Longer text will wrap + char tabChar; // If this char is seen the indent is changed to current pos + }; + + class Text { + public: + Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) + : attr( _attr ) + { + std::string wrappableChars = " [({.,/|\\-"; + std::size_t indent = _attr.initialIndent != std::string::npos + ? _attr.initialIndent + : _attr.indent; + std::string remainder = _str; + + while( !remainder.empty() ) { + if( lines.size() >= 1000 ) { + lines.push_back( "... message truncated due to excessive size" ); + return; + } + std::size_t tabPos = std::string::npos; + std::size_t width = (std::min)( remainder.size(), _attr.width - indent ); + std::size_t pos = remainder.find_first_of( '\n' ); + if( pos <= width ) { + width = pos; + } + pos = remainder.find_last_of( _attr.tabChar, width ); + if( pos != std::string::npos ) { + tabPos = pos; + if( remainder[width] == '\n' ) + width--; + remainder = remainder.substr( 0, tabPos ) + remainder.substr( tabPos+1 ); + } + + if( width == remainder.size() ) { + spliceLine( indent, remainder, width ); + } + else if( remainder[width] == '\n' ) { + spliceLine( indent, remainder, width ); + if( width <= 1 || remainder.size() != 1 ) + remainder = remainder.substr( 1 ); + indent = _attr.indent; + } + else { + pos = remainder.find_last_of( wrappableChars, width ); + if( pos != std::string::npos && pos > 0 ) { + spliceLine( indent, remainder, pos ); + if( remainder[0] == ' ' ) + remainder = remainder.substr( 1 ); + } + else { + spliceLine( indent, remainder, width-1 ); + lines.back() += "-"; + } + if( lines.size() == 1 ) + indent = _attr.indent; + if( tabPos != std::string::npos ) + indent += tabPos; + } + } + } + + void spliceLine( std::size_t _indent, std::string& _remainder, std::size_t _pos ) { + lines.push_back( std::string( _indent, ' ' ) + _remainder.substr( 0, _pos ) ); + _remainder = _remainder.substr( _pos ); + } + + typedef std::vector::const_iterator const_iterator; + + const_iterator begin() const { return lines.begin(); } + const_iterator end() const { return lines.end(); } + std::string const& last() const { return lines.back(); } + std::size_t size() const { return lines.size(); } + std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } + std::string toString() const { + std::ostringstream oss; + oss << *this; + return oss.str(); + } + + inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { + for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); + it != itEnd; ++it ) { + if( it != _text.begin() ) + _stream << "\n"; + _stream << *it; + } + return _stream; + } + + private: + std::string str; + TextAttributes attr; + std::vector lines; + }; + +} // end namespace Tbc + +#ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE +} // end outer namespace +#endif + +#endif // TBC_TEXT_FORMAT_H_INCLUDED + +// ----------- end of #include from tbc_text_format.h ----------- +// ........... back in clara.h + +#undef STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE + +// ----------- #included from clara_compilers.h ----------- + +#ifndef TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED +#define TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED + +// Detect a number of compiler features - mostly C++11/14 conformance - by compiler +// The following features are defined: +// +// CLARA_CONFIG_CPP11_NULLPTR : is nullptr supported? +// CLARA_CONFIG_CPP11_NOEXCEPT : is noexcept supported? +// CLARA_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods +// CLARA_CONFIG_CPP11_OVERRIDE : is override supported? +// CLARA_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) + +// CLARA_CONFIG_CPP11_OR_GREATER : Is C++11 supported? + +// CLARA_CONFIG_VARIADIC_MACROS : are variadic macros supported? + +// In general each macro has a _NO_ form +// (e.g. CLARA_CONFIG_CPP11_NO_NULLPTR) which disables the feature. +// Many features, at point of detection, define an _INTERNAL_ macro, so they +// can be combined, en-mass, with the _NO_ forms later. + +// All the C++11 features can be disabled with CLARA_CONFIG_NO_CPP11 + +#ifdef __clang__ + +#if __has_feature(cxx_nullptr) +#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR +#endif + +#if __has_feature(cxx_noexcept) +#define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT +#endif + +#endif // __clang__ + +//////////////////////////////////////////////////////////////////////////////// +// GCC +#ifdef __GNUC__ + +#if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) +#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR +#endif + +// - otherwise more recent versions define __cplusplus >= 201103L +// and will get picked up below + +#endif // __GNUC__ + +//////////////////////////////////////////////////////////////////////////////// +// Visual C++ +#ifdef _MSC_VER + +#if (_MSC_VER >= 1600) +#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR +#define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR +#endif + +#if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) +#define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT +#define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS +#endif + +#endif // _MSC_VER + +//////////////////////////////////////////////////////////////////////////////// +// C++ language feature support + +// catch all support for C++11 +#if defined(__cplusplus) && __cplusplus >= 201103L + +#define CLARA_CPP11_OR_GREATER + +#if !defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) +#define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR +#endif + +#ifndef CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT +#define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT +#endif + +#ifndef CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS +#define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS +#endif + +#if !defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) +#define CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE +#endif +#if !defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) +#define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR +#endif + +#endif // __cplusplus >= 201103L + +// Now set the actual defines based on the above + anything the user has configured +#if defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NO_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_NO_CPP11) +#define CLARA_CONFIG_CPP11_NULLPTR +#endif +#if defined(CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_NO_CPP11) +#define CLARA_CONFIG_CPP11_NOEXCEPT +#endif +#if defined(CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_NO_CPP11) +#define CLARA_CONFIG_CPP11_GENERATED_METHODS +#endif +#if defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_OVERRIDE) && !defined(CLARA_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_CPP11) +#define CLARA_CONFIG_CPP11_OVERRIDE +#endif +#if defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_UNIQUE_PTR) && !defined(CLARA_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_CPP11) +#define CLARA_CONFIG_CPP11_UNIQUE_PTR +#endif + +// noexcept support: +#if defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_NOEXCEPT) +#define CLARA_NOEXCEPT noexcept +# define CLARA_NOEXCEPT_IS(x) noexcept(x) +#else +#define CLARA_NOEXCEPT throw() +# define CLARA_NOEXCEPT_IS(x) +#endif + +// nullptr support +#ifdef CLARA_CONFIG_CPP11_NULLPTR +#define CLARA_NULL nullptr +#else +#define CLARA_NULL NULL +#endif + +// override support +#ifdef CLARA_CONFIG_CPP11_OVERRIDE +#define CLARA_OVERRIDE override +#else +#define CLARA_OVERRIDE +#endif + +// unique_ptr support +#ifdef CLARA_CONFIG_CPP11_UNIQUE_PTR +# define CLARA_AUTO_PTR( T ) std::unique_ptr +#else +# define CLARA_AUTO_PTR( T ) std::auto_ptr +#endif + +#endif // TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED + +// ----------- end of #include from clara_compilers.h ----------- +// ........... back in clara.h + +#include +#include +#include + +#if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) +#define CLARA_PLATFORM_WINDOWS +#endif + +// Use optional outer namespace +#ifdef STITCH_CLARA_OPEN_NAMESPACE +STITCH_CLARA_OPEN_NAMESPACE +#endif + +namespace Clara { + + struct UnpositionalTag {}; + + extern UnpositionalTag _; + +#ifdef CLARA_CONFIG_MAIN + UnpositionalTag _; +#endif + + namespace Detail { + +#ifdef CLARA_CONSOLE_WIDTH + const unsigned int consoleWidth = CLARA_CONFIG_CONSOLE_WIDTH; +#else + const unsigned int consoleWidth = 80; +#endif + + using namespace Tbc; + + inline bool startsWith( std::string const& str, std::string const& prefix ) { + return str.size() >= prefix.size() && str.substr( 0, prefix.size() ) == prefix; + } + + template struct RemoveConstRef{ typedef T type; }; + template struct RemoveConstRef{ typedef T type; }; + template struct RemoveConstRef{ typedef T type; }; + template struct RemoveConstRef{ typedef T type; }; + + template struct IsBool { static const bool value = false; }; + template<> struct IsBool { static const bool value = true; }; + + template + void convertInto( std::string const& _source, T& _dest ) { + std::stringstream ss; + ss << _source; + ss >> _dest; + if( ss.fail() ) + throw std::runtime_error( "Unable to convert " + _source + " to destination type" ); + } + inline void convertInto( std::string const& _source, std::string& _dest ) { + _dest = _source; + } + char toLowerCh(char c) { + return static_cast( std::tolower( c ) ); + } + inline void convertInto( std::string const& _source, bool& _dest ) { + std::string sourceLC = _source; + std::transform( sourceLC.begin(), sourceLC.end(), sourceLC.begin(), toLowerCh ); + if( sourceLC == "y" || sourceLC == "1" || sourceLC == "true" || sourceLC == "yes" || sourceLC == "on" ) + _dest = true; + else if( sourceLC == "n" || sourceLC == "0" || sourceLC == "false" || sourceLC == "no" || sourceLC == "off" ) + _dest = false; + else + throw std::runtime_error( "Expected a boolean value but did not recognise:\n '" + _source + "'" ); + } + + template + struct IArgFunction { + virtual ~IArgFunction() {} +#ifdef CLARA_CONFIG_CPP11_GENERATED_METHODS + IArgFunction() = default; + IArgFunction( IArgFunction const& ) = default; +#endif + virtual void set( ConfigT& config, std::string const& value ) const = 0; + virtual bool takesArg() const = 0; + virtual IArgFunction* clone() const = 0; + }; + + template + class BoundArgFunction { + public: + BoundArgFunction() : functionObj( CLARA_NULL ) {} + BoundArgFunction( IArgFunction* _functionObj ) : functionObj( _functionObj ) {} + BoundArgFunction( BoundArgFunction const& other ) : functionObj( other.functionObj ? other.functionObj->clone() : CLARA_NULL ) {} + BoundArgFunction& operator = ( BoundArgFunction const& other ) { + IArgFunction* newFunctionObj = other.functionObj ? other.functionObj->clone() : CLARA_NULL; + delete functionObj; + functionObj = newFunctionObj; + return *this; + } + ~BoundArgFunction() { delete functionObj; } + + void set( ConfigT& config, std::string const& value ) const { + functionObj->set( config, value ); + } + bool takesArg() const { return functionObj->takesArg(); } + + bool isSet() const { + return functionObj != CLARA_NULL; + } + private: + IArgFunction* functionObj; + }; + + template + struct NullBinder : IArgFunction{ + virtual void set( C&, std::string const& ) const {} + virtual bool takesArg() const { return true; } + virtual IArgFunction* clone() const { return new NullBinder( *this ); } + }; + + template + struct BoundDataMember : IArgFunction{ + BoundDataMember( M C::* _member ) : member( _member ) {} + virtual void set( C& p, std::string const& stringValue ) const { + convertInto( stringValue, p.*member ); + } + virtual bool takesArg() const { return !IsBool::value; } + virtual IArgFunction* clone() const { return new BoundDataMember( *this ); } + M C::* member; + }; + template + struct BoundUnaryMethod : IArgFunction{ + BoundUnaryMethod( void (C::*_member)( M ) ) : member( _member ) {} + virtual void set( C& p, std::string const& stringValue ) const { + typename RemoveConstRef::type value; + convertInto( stringValue, value ); + (p.*member)( value ); + } + virtual bool takesArg() const { return !IsBool::value; } + virtual IArgFunction* clone() const { return new BoundUnaryMethod( *this ); } + void (C::*member)( M ); + }; + template + struct BoundNullaryMethod : IArgFunction{ + BoundNullaryMethod( void (C::*_member)() ) : member( _member ) {} + virtual void set( C& p, std::string const& stringValue ) const { + bool value; + convertInto( stringValue, value ); + if( value ) + (p.*member)(); + } + virtual bool takesArg() const { return false; } + virtual IArgFunction* clone() const { return new BoundNullaryMethod( *this ); } + void (C::*member)(); + }; + + template + struct BoundUnaryFunction : IArgFunction{ + BoundUnaryFunction( void (*_function)( C& ) ) : function( _function ) {} + virtual void set( C& obj, std::string const& stringValue ) const { + bool value; + convertInto( stringValue, value ); + if( value ) + function( obj ); + } + virtual bool takesArg() const { return false; } + virtual IArgFunction* clone() const { return new BoundUnaryFunction( *this ); } + void (*function)( C& ); + }; + + template + struct BoundBinaryFunction : IArgFunction{ + BoundBinaryFunction( void (*_function)( C&, T ) ) : function( _function ) {} + virtual void set( C& obj, std::string const& stringValue ) const { + typename RemoveConstRef::type value; + convertInto( stringValue, value ); + function( obj, value ); + } + virtual bool takesArg() const { return !IsBool::value; } + virtual IArgFunction* clone() const { return new BoundBinaryFunction( *this ); } + void (*function)( C&, T ); + }; + + } // namespace Detail + + inline std::vector argsToVector( int argc, char const* const* const argv ) { + std::vector args( static_cast( argc ) ); + for( std::size_t i = 0; i < static_cast( argc ); ++i ) + args[i] = argv[i]; + + return args; + } + + class Parser { + enum Mode { None, MaybeShortOpt, SlashOpt, ShortOpt, LongOpt, Positional }; + Mode mode; + std::size_t from; + bool inQuotes; + public: + + struct Token { + enum Type { Positional, ShortOpt, LongOpt }; + Token( Type _type, std::string const& _data ) : type( _type ), data( _data ) {} + Type type; + std::string data; + }; + + Parser() : mode( None ), from( 0 ), inQuotes( false ){} + + void parseIntoTokens( std::vector const& args, std::vector& tokens ) { + const std::string doubleDash = "--"; + for( std::size_t i = 1; i < args.size() && args[i] != doubleDash; ++i ) + parseIntoTokens( args[i], tokens); + } + + void parseIntoTokens( std::string const& arg, std::vector& tokens ) { + for( std::size_t i = 0; i < arg.size(); ++i ) { + char c = arg[i]; + if( c == '"' ) + inQuotes = !inQuotes; + mode = handleMode( i, c, arg, tokens ); + } + mode = handleMode( arg.size(), '\0', arg, tokens ); + } + Mode handleMode( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { + switch( mode ) { + case None: return handleNone( i, c ); + case MaybeShortOpt: return handleMaybeShortOpt( i, c ); + case ShortOpt: + case LongOpt: + case SlashOpt: return handleOpt( i, c, arg, tokens ); + case Positional: return handlePositional( i, c, arg, tokens ); + default: throw std::logic_error( "Unknown mode" ); + } + } + + Mode handleNone( std::size_t i, char c ) { + if( inQuotes ) { + from = i; + return Positional; + } + switch( c ) { + case '-': return MaybeShortOpt; +#ifdef CLARA_PLATFORM_WINDOWS + case '/': from = i+1; return SlashOpt; +#endif + default: from = i; return Positional; + } + } + Mode handleMaybeShortOpt( std::size_t i, char c ) { + switch( c ) { + case '-': from = i+1; return LongOpt; + default: from = i; return ShortOpt; + } + } + + Mode handleOpt( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { + if( std::string( ":=\0", 3 ).find( c ) == std::string::npos ) + return mode; + + std::string optName = arg.substr( from, i-from ); + if( mode == ShortOpt ) + for( std::size_t j = 0; j < optName.size(); ++j ) + tokens.push_back( Token( Token::ShortOpt, optName.substr( j, 1 ) ) ); + else if( mode == SlashOpt && optName.size() == 1 ) + tokens.push_back( Token( Token::ShortOpt, optName ) ); + else + tokens.push_back( Token( Token::LongOpt, optName ) ); + return None; + } + Mode handlePositional( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { + if( inQuotes || std::string( "\0", 1 ).find( c ) == std::string::npos ) + return mode; + + std::string data = arg.substr( from, i-from ); + tokens.push_back( Token( Token::Positional, data ) ); + return None; + } + }; + + template + struct CommonArgProperties { + CommonArgProperties() {} + CommonArgProperties( Detail::BoundArgFunction const& _boundField ) : boundField( _boundField ) {} + + Detail::BoundArgFunction boundField; + std::string description; + std::string detail; + std::string placeholder; // Only value if boundField takes an arg + + bool takesArg() const { + return !placeholder.empty(); + } + void validate() const { + if( !boundField.isSet() ) + throw std::logic_error( "option not bound" ); + } + }; + struct OptionArgProperties { + std::vector shortNames; + std::string longName; + + bool hasShortName( std::string const& shortName ) const { + return std::find( shortNames.begin(), shortNames.end(), shortName ) != shortNames.end(); + } + bool hasLongName( std::string const& _longName ) const { + return _longName == longName; + } + }; + struct PositionalArgProperties { + PositionalArgProperties() : position( -1 ) {} + int position; // -1 means non-positional (floating) + + bool isFixedPositional() const { + return position != -1; + } + }; + + template + class CommandLine { + + struct Arg : CommonArgProperties, OptionArgProperties, PositionalArgProperties { + Arg() {} + Arg( Detail::BoundArgFunction const& _boundField ) : CommonArgProperties( _boundField ) {} + + using CommonArgProperties::placeholder; // !TBD + + std::string dbgName() const { + if( !longName.empty() ) + return "--" + longName; + if( !shortNames.empty() ) + return "-" + shortNames[0]; + return "positional args"; + } + std::string commands() const { + std::ostringstream oss; + bool first = true; + std::vector::const_iterator it = shortNames.begin(), itEnd = shortNames.end(); + for(; it != itEnd; ++it ) { + if( first ) + first = false; + else + oss << ", "; + oss << "-" << *it; + } + if( !longName.empty() ) { + if( !first ) + oss << ", "; + oss << "--" << longName; + } + if( !placeholder.empty() ) + oss << " <" << placeholder << ">"; + return oss.str(); + } + }; + + typedef CLARA_AUTO_PTR( Arg ) ArgAutoPtr; + + friend void addOptName( Arg& arg, std::string const& optName ) + { + if( optName.empty() ) + return; + if( Detail::startsWith( optName, "--" ) ) { + if( !arg.longName.empty() ) + throw std::logic_error( "Only one long opt may be specified. '" + + arg.longName + + "' already specified, now attempting to add '" + + optName + "'" ); + arg.longName = optName.substr( 2 ); + } + else if( Detail::startsWith( optName, "-" ) ) + arg.shortNames.push_back( optName.substr( 1 ) ); + else + throw std::logic_error( "option must begin with - or --. Option was: '" + optName + "'" ); + } + friend void setPositionalArg( Arg& arg, int position ) + { + arg.position = position; + } + + class ArgBuilder { + public: + ArgBuilder( Arg* arg ) : m_arg( arg ) {} + + // Bind a non-boolean data member (requires placeholder string) + template + void bind( M C::* field, std::string const& placeholder ) { + m_arg->boundField = new Detail::BoundDataMember( field ); + m_arg->placeholder = placeholder; + } + // Bind a boolean data member (no placeholder required) + template + void bind( bool C::* field ) { + m_arg->boundField = new Detail::BoundDataMember( field ); + } + + // Bind a method taking a single, non-boolean argument (requires a placeholder string) + template + void bind( void (C::* unaryMethod)( M ), std::string const& placeholder ) { + m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); + m_arg->placeholder = placeholder; + } + + // Bind a method taking a single, boolean argument (no placeholder string required) + template + void bind( void (C::* unaryMethod)( bool ) ) { + m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); + } + + // Bind a method that takes no arguments (will be called if opt is present) + template + void bind( void (C::* nullaryMethod)() ) { + m_arg->boundField = new Detail::BoundNullaryMethod( nullaryMethod ); + } + + // Bind a free function taking a single argument - the object to operate on (no placeholder string required) + template + void bind( void (* unaryFunction)( C& ) ) { + m_arg->boundField = new Detail::BoundUnaryFunction( unaryFunction ); + } + + // Bind a free function taking a single argument - the object to operate on (requires a placeholder string) + template + void bind( void (* binaryFunction)( C&, T ), std::string const& placeholder ) { + m_arg->boundField = new Detail::BoundBinaryFunction( binaryFunction ); + m_arg->placeholder = placeholder; + } + + ArgBuilder& describe( std::string const& description ) { + m_arg->description = description; + return *this; + } + ArgBuilder& detail( std::string const& detail ) { + m_arg->detail = detail; + return *this; + } + + protected: + Arg* m_arg; + }; + + class OptBuilder : public ArgBuilder { + public: + OptBuilder( Arg* arg ) : ArgBuilder( arg ) {} + OptBuilder( OptBuilder& other ) : ArgBuilder( other ) {} + + OptBuilder& operator[]( std::string const& optName ) { + addOptName( *ArgBuilder::m_arg, optName ); + return *this; + } + }; + + public: + + CommandLine() + : m_boundProcessName( new Detail::NullBinder() ), + m_highestSpecifiedArgPosition( 0 ), + m_throwOnUnrecognisedTokens( false ) + {} + CommandLine( CommandLine const& other ) + : m_boundProcessName( other.m_boundProcessName ), + m_options ( other.m_options ), + m_positionalArgs( other.m_positionalArgs ), + m_highestSpecifiedArgPosition( other.m_highestSpecifiedArgPosition ), + m_throwOnUnrecognisedTokens( other.m_throwOnUnrecognisedTokens ) + { + if( other.m_floatingArg.get() ) + m_floatingArg.reset( new Arg( *other.m_floatingArg ) ); + } + + CommandLine& setThrowOnUnrecognisedTokens( bool shouldThrow = true ) { + m_throwOnUnrecognisedTokens = shouldThrow; + return *this; + } + + OptBuilder operator[]( std::string const& optName ) { + m_options.push_back( Arg() ); + addOptName( m_options.back(), optName ); + OptBuilder builder( &m_options.back() ); + return builder; + } + + ArgBuilder operator[]( int position ) { + m_positionalArgs.insert( std::make_pair( position, Arg() ) ); + if( position > m_highestSpecifiedArgPosition ) + m_highestSpecifiedArgPosition = position; + setPositionalArg( m_positionalArgs[position], position ); + ArgBuilder builder( &m_positionalArgs[position] ); + return builder; + } + + // Invoke this with the _ instance + ArgBuilder operator[]( UnpositionalTag ) { + if( m_floatingArg.get() ) + throw std::logic_error( "Only one unpositional argument can be added" ); + m_floatingArg.reset( new Arg() ); + ArgBuilder builder( m_floatingArg.get() ); + return builder; + } + + template + void bindProcessName( M C::* field ) { + m_boundProcessName = new Detail::BoundDataMember( field ); + } + template + void bindProcessName( void (C::*_unaryMethod)( M ) ) { + m_boundProcessName = new Detail::BoundUnaryMethod( _unaryMethod ); + } + + void optUsage( std::ostream& os, std::size_t indent = 0, std::size_t width = Detail::consoleWidth ) const { + typename std::vector::const_iterator itBegin = m_options.begin(), itEnd = m_options.end(), it; + std::size_t maxWidth = 0; + for( it = itBegin; it != itEnd; ++it ) + maxWidth = (std::max)( maxWidth, it->commands().size() ); + + for( it = itBegin; it != itEnd; ++it ) { + Detail::Text usage( it->commands(), Detail::TextAttributes() + .setWidth( maxWidth+indent ) + .setIndent( indent ) ); + Detail::Text desc( it->description, Detail::TextAttributes() + .setWidth( width - maxWidth - 3 ) ); + + for( std::size_t i = 0; i < (std::max)( usage.size(), desc.size() ); ++i ) { + std::string usageCol = i < usage.size() ? usage[i] : ""; + os << usageCol; + + if( i < desc.size() && !desc[i].empty() ) + os << std::string( indent + 2 + maxWidth - usageCol.size(), ' ' ) + << desc[i]; + os << "\n"; + } + } + } + std::string optUsage() const { + std::ostringstream oss; + optUsage( oss ); + return oss.str(); + } + + void argSynopsis( std::ostream& os ) const { + for( int i = 1; i <= m_highestSpecifiedArgPosition; ++i ) { + if( i > 1 ) + os << " "; + typename std::map::const_iterator it = m_positionalArgs.find( i ); + if( it != m_positionalArgs.end() ) + os << "<" << it->second.placeholder << ">"; + else if( m_floatingArg.get() ) + os << "<" << m_floatingArg->placeholder << ">"; + else + throw std::logic_error( "non consecutive positional arguments with no floating args" ); + } + // !TBD No indication of mandatory args + if( m_floatingArg.get() ) { + if( m_highestSpecifiedArgPosition > 1 ) + os << " "; + os << "[<" << m_floatingArg->placeholder << "> ...]"; + } + } + std::string argSynopsis() const { + std::ostringstream oss; + argSynopsis( oss ); + return oss.str(); + } + + void usage( std::ostream& os, std::string const& procName ) const { + validate(); + os << "usage:\n " << procName << " "; + argSynopsis( os ); + if( !m_options.empty() ) { + os << " [options]\n\nwhere options are: \n"; + optUsage( os, 2 ); + } + os << "\n"; + } + std::string usage( std::string const& procName ) const { + std::ostringstream oss; + usage( oss, procName ); + return oss.str(); + } + + ConfigT parse( std::vector const& args ) const { + ConfigT config; + parseInto( args, config ); + return config; + } + + std::vector parseInto( std::vector const& args, ConfigT& config ) const { + std::string processName = args.empty() ? std::string() : args[0]; + std::size_t lastSlash = processName.find_last_of( "/\\" ); + if( lastSlash != std::string::npos ) + processName = processName.substr( lastSlash+1 ); + m_boundProcessName.set( config, processName ); + std::vector tokens; + Parser parser; + parser.parseIntoTokens( args, tokens ); + return populate( tokens, config ); + } + + std::vector populate( std::vector const& tokens, ConfigT& config ) const { + validate(); + std::vector unusedTokens = populateOptions( tokens, config ); + unusedTokens = populateFixedArgs( unusedTokens, config ); + unusedTokens = populateFloatingArgs( unusedTokens, config ); + return unusedTokens; + } + + std::vector populateOptions( std::vector const& tokens, ConfigT& config ) const { + std::vector unusedTokens; + std::vector errors; + for( std::size_t i = 0; i < tokens.size(); ++i ) { + Parser::Token const& token = tokens[i]; + typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); + for(; it != itEnd; ++it ) { + Arg const& arg = *it; + + try { + if( ( token.type == Parser::Token::ShortOpt && arg.hasShortName( token.data ) ) || + ( token.type == Parser::Token::LongOpt && arg.hasLongName( token.data ) ) ) { + if( arg.takesArg() ) { + if( i == tokens.size()-1 || tokens[i+1].type != Parser::Token::Positional ) + errors.push_back( "Expected argument to option: " + token.data ); + else + arg.boundField.set( config, tokens[++i].data ); + } + else { + arg.boundField.set( config, "true" ); + } + break; + } + } + catch( std::exception& ex ) { + errors.push_back( std::string( ex.what() ) + "\n- while parsing: (" + arg.commands() + ")" ); + } + } + if( it == itEnd ) { + if( token.type == Parser::Token::Positional || !m_throwOnUnrecognisedTokens ) + unusedTokens.push_back( token ); + else if( errors.empty() && m_throwOnUnrecognisedTokens ) + errors.push_back( "unrecognised option: " + token.data ); + } + } + if( !errors.empty() ) { + std::ostringstream oss; + for( std::vector::const_iterator it = errors.begin(), itEnd = errors.end(); + it != itEnd; + ++it ) { + if( it != errors.begin() ) + oss << "\n"; + oss << *it; + } + throw std::runtime_error( oss.str() ); + } + return unusedTokens; + } + std::vector populateFixedArgs( std::vector const& tokens, ConfigT& config ) const { + std::vector unusedTokens; + int position = 1; + for( std::size_t i = 0; i < tokens.size(); ++i ) { + Parser::Token const& token = tokens[i]; + typename std::map::const_iterator it = m_positionalArgs.find( position ); + if( it != m_positionalArgs.end() ) + it->second.boundField.set( config, token.data ); + else + unusedTokens.push_back( token ); + if( token.type == Parser::Token::Positional ) + position++; + } + return unusedTokens; + } + std::vector populateFloatingArgs( std::vector const& tokens, ConfigT& config ) const { + if( !m_floatingArg.get() ) + return tokens; + std::vector unusedTokens; + for( std::size_t i = 0; i < tokens.size(); ++i ) { + Parser::Token const& token = tokens[i]; + if( token.type == Parser::Token::Positional ) + m_floatingArg->boundField.set( config, token.data ); + else + unusedTokens.push_back( token ); + } + return unusedTokens; + } + + void validate() const + { + if( m_options.empty() && m_positionalArgs.empty() && !m_floatingArg.get() ) + throw std::logic_error( "No options or arguments specified" ); + + for( typename std::vector::const_iterator it = m_options.begin(), + itEnd = m_options.end(); + it != itEnd; ++it ) + it->validate(); + } + + private: + Detail::BoundArgFunction m_boundProcessName; + std::vector m_options; + std::map m_positionalArgs; + ArgAutoPtr m_floatingArg; + int m_highestSpecifiedArgPosition; + bool m_throwOnUnrecognisedTokens; + }; + +} // end namespace Clara + +STITCH_CLARA_CLOSE_NAMESPACE +#undef STITCH_CLARA_OPEN_NAMESPACE +#undef STITCH_CLARA_CLOSE_NAMESPACE + +#endif // TWOBLUECUBES_CLARA_H_INCLUDED +#undef STITCH_CLARA_OPEN_NAMESPACE + +// Restore Clara's value for console width, if present +#ifdef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH +#define CLARA_CONFIG_CONSOLE_WIDTH CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH +#undef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH +#endif + +#include +#include + +namespace Catch { + + inline void abortAfterFirst( ConfigData& config ) { config.abortAfter = 1; } + inline void abortAfterX( ConfigData& config, int x ) { + if( x < 1 ) + throw std::runtime_error( "Value after -x or --abortAfter must be greater than zero" ); + config.abortAfter = x; + } + inline void addTestOrTags( ConfigData& config, std::string const& _testSpec ) { config.testsOrTags.push_back( _testSpec ); } + inline void addSectionToRun( ConfigData& config, std::string const& sectionName ) { config.sectionsToRun.push_back( sectionName ); } + inline void addReporterName( ConfigData& config, std::string const& _reporterName ) { config.reporterNames.push_back( _reporterName ); } + + inline void addWarning( ConfigData& config, std::string const& _warning ) { + if( _warning == "NoAssertions" ) + config.warnings = static_cast( config.warnings | WarnAbout::NoAssertions ); + else + throw std::runtime_error( "Unrecognised warning: '" + _warning + '\'' ); + } + inline void setOrder( ConfigData& config, std::string const& order ) { + if( startsWith( "declared", order ) ) + config.runOrder = RunTests::InDeclarationOrder; + else if( startsWith( "lexical", order ) ) + config.runOrder = RunTests::InLexicographicalOrder; + else if( startsWith( "random", order ) ) + config.runOrder = RunTests::InRandomOrder; + else + throw std::runtime_error( "Unrecognised ordering: '" + order + '\'' ); + } + inline void setRngSeed( ConfigData& config, std::string const& seed ) { + if( seed == "time" ) { + config.rngSeed = static_cast( std::time(0) ); + } + else { + std::stringstream ss; + ss << seed; + ss >> config.rngSeed; + if( ss.fail() ) + throw std::runtime_error( "Argument to --rng-seed should be the word 'time' or a number" ); + } + } + inline void setVerbosity( ConfigData& config, int level ) { + // !TBD: accept strings? + config.verbosity = static_cast( level ); + } + inline void setShowDurations( ConfigData& config, bool _showDurations ) { + config.showDurations = _showDurations + ? ShowDurations::Always + : ShowDurations::Never; + } + inline void setUseColour( ConfigData& config, std::string const& value ) { + std::string mode = toLower( value ); + + if( mode == "yes" ) + config.useColour = UseColour::Yes; + else if( mode == "no" ) + config.useColour = UseColour::No; + else if( mode == "auto" ) + config.useColour = UseColour::Auto; + else + throw std::runtime_error( "colour mode must be one of: auto, yes or no" ); + } + inline void forceColour( ConfigData& config ) { + config.useColour = UseColour::Yes; + } + inline void loadTestNamesFromFile( ConfigData& config, std::string const& _filename ) { + std::ifstream f( _filename.c_str() ); + if( !f.is_open() ) + throw std::domain_error( "Unable to load input file: " + _filename ); + + std::string line; + while( std::getline( f, line ) ) { + line = trim(line); + if( !line.empty() && !startsWith( line, '#' ) ) { + if( !startsWith( line, '"' ) ) + line = '"' + line + '"'; + addTestOrTags( config, line + ',' ); + } + } + } + + inline Clara::CommandLine makeCommandLineParser() { + + using namespace Clara; + CommandLine cli; + + cli.bindProcessName( &ConfigData::processName ); + + cli["-?"]["-h"]["--help"] + .describe( "display usage information" ) + .bind( &ConfigData::showHelp ); + + cli["-l"]["--list-tests"] + .describe( "list all/matching test cases" ) + .bind( &ConfigData::listTests ); + + cli["-t"]["--list-tags"] + .describe( "list all/matching tags" ) + .bind( &ConfigData::listTags ); + + cli["-s"]["--success"] + .describe( "include successful tests in output" ) + .bind( &ConfigData::showSuccessfulTests ); + + cli["-b"]["--break"] + .describe( "break into debugger on failure" ) + .bind( &ConfigData::shouldDebugBreak ); + + cli["-e"]["--nothrow"] + .describe( "skip exception tests" ) + .bind( &ConfigData::noThrow ); + + cli["-i"]["--invisibles"] + .describe( "show invisibles (tabs, newlines)" ) + .bind( &ConfigData::showInvisibles ); + + cli["-o"]["--out"] + .describe( "output filename" ) + .bind( &ConfigData::outputFilename, "filename" ); + + cli["-r"]["--reporter"] +// .placeholder( "name[:filename]" ) + .describe( "reporter to use (defaults to console)" ) + .bind( &addReporterName, "name" ); + + cli["-n"]["--name"] + .describe( "suite name" ) + .bind( &ConfigData::name, "name" ); + + cli["-a"]["--abort"] + .describe( "abort at first failure" ) + .bind( &abortAfterFirst ); + + cli["-x"]["--abortx"] + .describe( "abort after x failures" ) + .bind( &abortAfterX, "no. failures" ); + + cli["-w"]["--warn"] + .describe( "enable warnings" ) + .bind( &addWarning, "warning name" ); + +// - needs updating if reinstated +// cli.into( &setVerbosity ) +// .describe( "level of verbosity (0=no output)" ) +// .shortOpt( "v") +// .longOpt( "verbosity" ) +// .placeholder( "level" ); + + cli[_] + .describe( "which test or tests to use" ) + .bind( &addTestOrTags, "test name, pattern or tags" ); + + cli["-d"]["--durations"] + .describe( "show test durations" ) + .bind( &setShowDurations, "yes|no" ); + + cli["-f"]["--input-file"] + .describe( "load test names to run from a file" ) + .bind( &loadTestNamesFromFile, "filename" ); + + cli["-#"]["--filenames-as-tags"] + .describe( "adds a tag for the filename" ) + .bind( &ConfigData::filenamesAsTags ); + + cli["-c"]["--section"] + .describe( "specify section to run" ) + .bind( &addSectionToRun, "section name" ); + + // Less common commands which don't have a short form + cli["--list-test-names-only"] + .describe( "list all/matching test cases names only" ) + .bind( &ConfigData::listTestNamesOnly ); + + cli["--list-reporters"] + .describe( "list all reporters" ) + .bind( &ConfigData::listReporters ); + + cli["--order"] + .describe( "test case order (defaults to decl)" ) + .bind( &setOrder, "decl|lex|rand" ); + + cli["--rng-seed"] + .describe( "set a specific seed for random numbers" ) + .bind( &setRngSeed, "'time'|number" ); + + cli["--force-colour"] + .describe( "force colourised output (deprecated)" ) + .bind( &forceColour ); + + cli["--use-colour"] + .describe( "should output be colourised" ) + .bind( &setUseColour, "yes|no" ); + + return cli; + } + +} // end namespace Catch + +// #included from: internal/catch_list.hpp +#define TWOBLUECUBES_CATCH_LIST_HPP_INCLUDED + +// #included from: catch_text.h +#define TWOBLUECUBES_CATCH_TEXT_H_INCLUDED + +#define TBC_TEXT_FORMAT_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH + +#define CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE Catch +// #included from: ../external/tbc_text_format.h +// Only use header guard if we are not using an outer namespace +#ifndef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE +# ifdef TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED +# ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED +# define TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED +# endif +# else +# define TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED +# endif +#endif +#ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED +#include +#include +#include + +// Use optional outer namespace +#ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE +namespace CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE { +#endif + +namespace Tbc { + +#ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH + const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; +#else + const unsigned int consoleWidth = 80; +#endif + + struct TextAttributes { + TextAttributes() + : initialIndent( std::string::npos ), + indent( 0 ), + width( consoleWidth-1 ) + {} + + TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } + TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } + TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } + + std::size_t initialIndent; // indent of first line, or npos + std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos + std::size_t width; // maximum width of text, including indent. Longer text will wrap + }; + + class Text { + public: + Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) + : attr( _attr ) + { + const std::string wrappableBeforeChars = "[({<\t"; + const std::string wrappableAfterChars = "])}>-,./|\\"; + const std::string wrappableInsteadOfChars = " \n\r"; + std::string indent = _attr.initialIndent != std::string::npos + ? std::string( _attr.initialIndent, ' ' ) + : std::string( _attr.indent, ' ' ); + + typedef std::string::const_iterator iterator; + iterator it = _str.begin(); + const iterator strEnd = _str.end(); + + while( it != strEnd ) { + + if( lines.size() >= 1000 ) { + lines.push_back( "... message truncated due to excessive size" ); + return; + } + + std::string suffix; + std::size_t width = (std::min)( static_cast( strEnd-it ), _attr.width-static_cast( indent.size() ) ); + iterator itEnd = it+width; + iterator itNext = _str.end(); + + iterator itNewLine = std::find( it, itEnd, '\n' ); + if( itNewLine != itEnd ) + itEnd = itNewLine; + + if( itEnd != strEnd ) { + bool foundWrapPoint = false; + iterator findIt = itEnd; + do { + if( wrappableAfterChars.find( *findIt ) != std::string::npos && findIt != itEnd ) { + itEnd = findIt+1; + itNext = findIt+1; + foundWrapPoint = true; + } + else if( findIt > it && wrappableBeforeChars.find( *findIt ) != std::string::npos ) { + itEnd = findIt; + itNext = findIt; + foundWrapPoint = true; + } + else if( wrappableInsteadOfChars.find( *findIt ) != std::string::npos ) { + itNext = findIt+1; + itEnd = findIt; + foundWrapPoint = true; + } + if( findIt == it ) + break; + else + --findIt; + } + while( !foundWrapPoint ); + + if( !foundWrapPoint ) { + // No good wrap char, so we'll break mid word and add a hyphen + --itEnd; + itNext = itEnd; + suffix = "-"; + } + else { + while( itEnd > it && wrappableInsteadOfChars.find( *(itEnd-1) ) != std::string::npos ) + --itEnd; + } + } + lines.push_back( indent + std::string( it, itEnd ) + suffix ); + + if( indent.size() != _attr.indent ) + indent = std::string( _attr.indent, ' ' ); + it = itNext; + } + } + + typedef std::vector::const_iterator const_iterator; + + const_iterator begin() const { return lines.begin(); } + const_iterator end() const { return lines.end(); } + std::string const& last() const { return lines.back(); } + std::size_t size() const { return lines.size(); } + std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } + std::string toString() const { + std::ostringstream oss; + oss << *this; + return oss.str(); + } + + inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { + for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); + it != itEnd; ++it ) { + if( it != _text.begin() ) + _stream << "\n"; + _stream << *it; + } + return _stream; + } + + private: + std::string str; + TextAttributes attr; + std::vector lines; + }; + +} // end namespace Tbc + +#ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE +} // end outer namespace +#endif + +#endif // TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED +#undef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE + +namespace Catch { + using Tbc::Text; + using Tbc::TextAttributes; +} + +// #included from: catch_console_colour.hpp +#define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_HPP_INCLUDED + +namespace Catch { + + struct Colour { + enum Code { + None = 0, + + White, + Red, + Green, + Blue, + Cyan, + Yellow, + Grey, + + Bright = 0x10, + + BrightRed = Bright | Red, + BrightGreen = Bright | Green, + LightGrey = Bright | Grey, + BrightWhite = Bright | White, + + // By intention + FileName = LightGrey, + Warning = Yellow, + ResultError = BrightRed, + ResultSuccess = BrightGreen, + ResultExpectedFailure = Warning, + + Error = BrightRed, + Success = Green, + + OriginalExpression = Cyan, + ReconstructedExpression = Yellow, + + SecondaryText = LightGrey, + Headers = White + }; + + // Use constructed object for RAII guard + Colour( Code _colourCode ); + Colour( Colour const& other ); + ~Colour(); + + // Use static method for one-shot changes + static void use( Code _colourCode ); + + private: + bool m_moved; + }; + + inline std::ostream& operator << ( std::ostream& os, Colour const& ) { return os; } + +} // end namespace Catch + +// #included from: catch_interfaces_reporter.h +#define TWOBLUECUBES_CATCH_INTERFACES_REPORTER_H_INCLUDED + +#include +#include +#include + +namespace Catch +{ + struct ReporterConfig { + explicit ReporterConfig( Ptr const& _fullConfig ) + : m_stream( &_fullConfig->stream() ), m_fullConfig( _fullConfig ) {} + + ReporterConfig( Ptr const& _fullConfig, std::ostream& _stream ) + : m_stream( &_stream ), m_fullConfig( _fullConfig ) {} + + std::ostream& stream() const { return *m_stream; } + Ptr fullConfig() const { return m_fullConfig; } + + private: + std::ostream* m_stream; + Ptr m_fullConfig; + }; + + struct ReporterPreferences { + ReporterPreferences() + : shouldRedirectStdOut( false ) + {} + + bool shouldRedirectStdOut; + }; + + template + struct LazyStat : Option { + LazyStat() : used( false ) {} + LazyStat& operator=( T const& _value ) { + Option::operator=( _value ); + used = false; + return *this; + } + void reset() { + Option::reset(); + used = false; + } + bool used; + }; + + struct TestRunInfo { + TestRunInfo( std::string const& _name ) : name( _name ) {} + std::string name; + }; + struct GroupInfo { + GroupInfo( std::string const& _name, + std::size_t _groupIndex, + std::size_t _groupsCount ) + : name( _name ), + groupIndex( _groupIndex ), + groupsCounts( _groupsCount ) + {} + + std::string name; + std::size_t groupIndex; + std::size_t groupsCounts; + }; + + struct AssertionStats { + AssertionStats( AssertionResult const& _assertionResult, + std::vector const& _infoMessages, + Totals const& _totals ) + : assertionResult( _assertionResult ), + infoMessages( _infoMessages ), + totals( _totals ) + { + if( assertionResult.hasMessage() ) { + // Copy message into messages list. + // !TBD This should have been done earlier, somewhere + MessageBuilder builder( assertionResult.getTestMacroName(), assertionResult.getSourceInfo(), assertionResult.getResultType() ); + builder << assertionResult.getMessage(); + builder.m_info.message = builder.m_stream.str(); + + infoMessages.push_back( builder.m_info ); + } + } + virtual ~AssertionStats(); + +# ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + AssertionStats( AssertionStats const& ) = default; + AssertionStats( AssertionStats && ) = default; + AssertionStats& operator = ( AssertionStats const& ) = default; + AssertionStats& operator = ( AssertionStats && ) = default; +# endif + + AssertionResult assertionResult; + std::vector infoMessages; + Totals totals; + }; + + struct SectionStats { + SectionStats( SectionInfo const& _sectionInfo, + Counts const& _assertions, + double _durationInSeconds, + bool _missingAssertions ) + : sectionInfo( _sectionInfo ), + assertions( _assertions ), + durationInSeconds( _durationInSeconds ), + missingAssertions( _missingAssertions ) + {} + virtual ~SectionStats(); +# ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + SectionStats( SectionStats const& ) = default; + SectionStats( SectionStats && ) = default; + SectionStats& operator = ( SectionStats const& ) = default; + SectionStats& operator = ( SectionStats && ) = default; +# endif + + SectionInfo sectionInfo; + Counts assertions; + double durationInSeconds; + bool missingAssertions; + }; + + struct TestCaseStats { + TestCaseStats( TestCaseInfo const& _testInfo, + Totals const& _totals, + std::string const& _stdOut, + std::string const& _stdErr, + bool _aborting ) + : testInfo( _testInfo ), + totals( _totals ), + stdOut( _stdOut ), + stdErr( _stdErr ), + aborting( _aborting ) + {} + virtual ~TestCaseStats(); + +# ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + TestCaseStats( TestCaseStats const& ) = default; + TestCaseStats( TestCaseStats && ) = default; + TestCaseStats& operator = ( TestCaseStats const& ) = default; + TestCaseStats& operator = ( TestCaseStats && ) = default; +# endif + + TestCaseInfo testInfo; + Totals totals; + std::string stdOut; + std::string stdErr; + bool aborting; + }; + + struct TestGroupStats { + TestGroupStats( GroupInfo const& _groupInfo, + Totals const& _totals, + bool _aborting ) + : groupInfo( _groupInfo ), + totals( _totals ), + aborting( _aborting ) + {} + TestGroupStats( GroupInfo const& _groupInfo ) + : groupInfo( _groupInfo ), + aborting( false ) + {} + virtual ~TestGroupStats(); + +# ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS + TestGroupStats( TestGroupStats const& ) = default; + TestGroupStats( TestGroupStats && ) = default; + TestGroupStats& operator = ( TestGroupStats const& ) = default; + TestGroupStats& operator = ( TestGroupStats && ) = default; +# endif + + GroupInfo groupInfo; + Totals totals; + bool aborting; + }; + + struct TestRunStats { + TestRunStats( TestRunInfo const& _runInfo, + Totals const& _totals, + bool _aborting ) + : runInfo( _runInfo ), + totals( _totals ), + aborting( _aborting ) + {} + virtual ~TestRunStats(); + +# ifndef CATCH_CONFIG_CPP11_GENERATED_METHODS + TestRunStats( TestRunStats const& _other ) + : runInfo( _other.runInfo ), + totals( _other.totals ), + aborting( _other.aborting ) + {} +# else + TestRunStats( TestRunStats const& ) = default; + TestRunStats( TestRunStats && ) = default; + TestRunStats& operator = ( TestRunStats const& ) = default; + TestRunStats& operator = ( TestRunStats && ) = default; +# endif + + TestRunInfo runInfo; + Totals totals; + bool aborting; + }; + + class MultipleReporters; + + struct IStreamingReporter : IShared { + virtual ~IStreamingReporter(); + + // Implementing class must also provide the following static method: + // static std::string getDescription(); + + virtual ReporterPreferences getPreferences() const = 0; + + virtual void noMatchingTestCases( std::string const& spec ) = 0; + + virtual void testRunStarting( TestRunInfo const& testRunInfo ) = 0; + virtual void testGroupStarting( GroupInfo const& groupInfo ) = 0; + + virtual void testCaseStarting( TestCaseInfo const& testInfo ) = 0; + virtual void sectionStarting( SectionInfo const& sectionInfo ) = 0; + + virtual void assertionStarting( AssertionInfo const& assertionInfo ) = 0; + + // The return value indicates if the messages buffer should be cleared: + virtual bool assertionEnded( AssertionStats const& assertionStats ) = 0; + + virtual void sectionEnded( SectionStats const& sectionStats ) = 0; + virtual void testCaseEnded( TestCaseStats const& testCaseStats ) = 0; + virtual void testGroupEnded( TestGroupStats const& testGroupStats ) = 0; + virtual void testRunEnded( TestRunStats const& testRunStats ) = 0; + + virtual void skipTest( TestCaseInfo const& testInfo ) = 0; + + virtual MultipleReporters* tryAsMulti() { return CATCH_NULL; } + }; + + struct IReporterFactory : IShared { + virtual ~IReporterFactory(); + virtual IStreamingReporter* create( ReporterConfig const& config ) const = 0; + virtual std::string getDescription() const = 0; + }; + + struct IReporterRegistry { + typedef std::map > FactoryMap; + typedef std::vector > Listeners; + + virtual ~IReporterRegistry(); + virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const = 0; + virtual FactoryMap const& getFactories() const = 0; + virtual Listeners const& getListeners() const = 0; + }; + + Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ); + +} + +#include +#include + +namespace Catch { + + inline std::size_t listTests( Config const& config ) { + + TestSpec testSpec = config.testSpec(); + if( config.testSpec().hasFilters() ) + Catch::cout() << "Matching test cases:\n"; + else { + Catch::cout() << "All available test cases:\n"; + testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); + } + + std::size_t matchedTests = 0; + TextAttributes nameAttr, tagsAttr; + nameAttr.setInitialIndent( 2 ).setIndent( 4 ); + tagsAttr.setIndent( 6 ); + + std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); + for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); + it != itEnd; + ++it ) { + matchedTests++; + TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); + Colour::Code colour = testCaseInfo.isHidden() + ? Colour::SecondaryText + : Colour::None; + Colour colourGuard( colour ); + + Catch::cout() << Text( testCaseInfo.name, nameAttr ) << std::endl; + if( !testCaseInfo.tags.empty() ) + Catch::cout() << Text( testCaseInfo.tagsAsString, tagsAttr ) << std::endl; + } + + if( !config.testSpec().hasFilters() ) + Catch::cout() << pluralise( matchedTests, "test case" ) << '\n' << std::endl; + else + Catch::cout() << pluralise( matchedTests, "matching test case" ) << '\n' << std::endl; + return matchedTests; + } + + inline std::size_t listTestsNamesOnly( Config const& config ) { + TestSpec testSpec = config.testSpec(); + if( !config.testSpec().hasFilters() ) + testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); + std::size_t matchedTests = 0; + std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); + for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); + it != itEnd; + ++it ) { + matchedTests++; + TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); + if( startsWith( testCaseInfo.name, '#' ) ) + Catch::cout() << '"' << testCaseInfo.name << '"' << std::endl; + else + Catch::cout() << testCaseInfo.name << std::endl; + } + return matchedTests; + } + + struct TagInfo { + TagInfo() : count ( 0 ) {} + void add( std::string const& spelling ) { + ++count; + spellings.insert( spelling ); + } + std::string all() const { + std::string out; + for( std::set::const_iterator it = spellings.begin(), itEnd = spellings.end(); + it != itEnd; + ++it ) + out += "[" + *it + "]"; + return out; + } + std::set spellings; + std::size_t count; + }; + + inline std::size_t listTags( Config const& config ) { + TestSpec testSpec = config.testSpec(); + if( config.testSpec().hasFilters() ) + Catch::cout() << "Tags for matching test cases:\n"; + else { + Catch::cout() << "All available tags:\n"; + testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); + } + + std::map tagCounts; + + std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); + for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); + it != itEnd; + ++it ) { + for( std::set::const_iterator tagIt = it->getTestCaseInfo().tags.begin(), + tagItEnd = it->getTestCaseInfo().tags.end(); + tagIt != tagItEnd; + ++tagIt ) { + std::string tagName = *tagIt; + std::string lcaseTagName = toLower( tagName ); + std::map::iterator countIt = tagCounts.find( lcaseTagName ); + if( countIt == tagCounts.end() ) + countIt = tagCounts.insert( std::make_pair( lcaseTagName, TagInfo() ) ).first; + countIt->second.add( tagName ); + } + } + + for( std::map::const_iterator countIt = tagCounts.begin(), + countItEnd = tagCounts.end(); + countIt != countItEnd; + ++countIt ) { + std::ostringstream oss; + oss << " " << std::setw(2) << countIt->second.count << " "; + Text wrapper( countIt->second.all(), TextAttributes() + .setInitialIndent( 0 ) + .setIndent( oss.str().size() ) + .setWidth( CATCH_CONFIG_CONSOLE_WIDTH-10 ) ); + Catch::cout() << oss.str() << wrapper << '\n'; + } + Catch::cout() << pluralise( tagCounts.size(), "tag" ) << '\n' << std::endl; + return tagCounts.size(); + } + + inline std::size_t listReporters( Config const& /*config*/ ) { + Catch::cout() << "Available reporters:\n"; + IReporterRegistry::FactoryMap const& factories = getRegistryHub().getReporterRegistry().getFactories(); + IReporterRegistry::FactoryMap::const_iterator itBegin = factories.begin(), itEnd = factories.end(), it; + std::size_t maxNameLen = 0; + for(it = itBegin; it != itEnd; ++it ) + maxNameLen = (std::max)( maxNameLen, it->first.size() ); + + for(it = itBegin; it != itEnd; ++it ) { + Text wrapper( it->second->getDescription(), TextAttributes() + .setInitialIndent( 0 ) + .setIndent( 7+maxNameLen ) + .setWidth( CATCH_CONFIG_CONSOLE_WIDTH - maxNameLen-8 ) ); + Catch::cout() << " " + << it->first + << ':' + << std::string( maxNameLen - it->first.size() + 2, ' ' ) + << wrapper << '\n'; + } + Catch::cout() << std::endl; + return factories.size(); + } + + inline Option list( Config const& config ) { + Option listedCount; + if( config.listTests() ) + listedCount = listedCount.valueOr(0) + listTests( config ); + if( config.listTestNamesOnly() ) + listedCount = listedCount.valueOr(0) + listTestsNamesOnly( config ); + if( config.listTags() ) + listedCount = listedCount.valueOr(0) + listTags( config ); + if( config.listReporters() ) + listedCount = listedCount.valueOr(0) + listReporters( config ); + return listedCount; + } + +} // end namespace Catch + +// #included from: internal/catch_run_context.hpp +#define TWOBLUECUBES_CATCH_RUNNER_IMPL_HPP_INCLUDED + +// #included from: catch_test_case_tracker.hpp +#define TWOBLUECUBES_CATCH_TEST_CASE_TRACKER_HPP_INCLUDED + +#include +#include +#include +#include +#include + +CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS + +namespace Catch { +namespace TestCaseTracking { + + struct NameAndLocation { + std::string name; + SourceLineInfo location; + + NameAndLocation( std::string const& _name, SourceLineInfo const& _location ) + : name( _name ), + location( _location ) + {} + }; + + struct ITracker : SharedImpl<> { + virtual ~ITracker(); + + // static queries + virtual NameAndLocation const& nameAndLocation() const = 0; + + // dynamic queries + virtual bool isComplete() const = 0; // Successfully completed or failed + virtual bool isSuccessfullyCompleted() const = 0; + virtual bool isOpen() const = 0; // Started but not complete + virtual bool hasChildren() const = 0; + + virtual ITracker& parent() = 0; + + // actions + virtual void close() = 0; // Successfully complete + virtual void fail() = 0; + virtual void markAsNeedingAnotherRun() = 0; + + virtual void addChild( Ptr const& child ) = 0; + virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) = 0; + virtual void openChild() = 0; + + // Debug/ checking + virtual bool isSectionTracker() const = 0; + virtual bool isIndexTracker() const = 0; + }; + + class TrackerContext { + + enum RunState { + NotStarted, + Executing, + CompletedCycle + }; + + Ptr m_rootTracker; + ITracker* m_currentTracker; + RunState m_runState; + + public: + + static TrackerContext& instance() { + static TrackerContext s_instance; + return s_instance; + } + + TrackerContext() + : m_currentTracker( CATCH_NULL ), + m_runState( NotStarted ) + {} + + ITracker& startRun(); + + void endRun() { + m_rootTracker.reset(); + m_currentTracker = CATCH_NULL; + m_runState = NotStarted; + } + + void startCycle() { + m_currentTracker = m_rootTracker.get(); + m_runState = Executing; + } + void completeCycle() { + m_runState = CompletedCycle; + } + + bool completedCycle() const { + return m_runState == CompletedCycle; + } + ITracker& currentTracker() { + return *m_currentTracker; + } + void setCurrentTracker( ITracker* tracker ) { + m_currentTracker = tracker; + } + }; + + class TrackerBase : public ITracker { + protected: + enum CycleState { + NotStarted, + Executing, + ExecutingChildren, + NeedsAnotherRun, + CompletedSuccessfully, + Failed + }; + class TrackerHasName { + NameAndLocation m_nameAndLocation; + public: + TrackerHasName( NameAndLocation const& nameAndLocation ) : m_nameAndLocation( nameAndLocation ) {} + bool operator ()( Ptr const& tracker ) { + return + tracker->nameAndLocation().name == m_nameAndLocation.name && + tracker->nameAndLocation().location == m_nameAndLocation.location; + } + }; + typedef std::vector > Children; + NameAndLocation m_nameAndLocation; + TrackerContext& m_ctx; + ITracker* m_parent; + Children m_children; + CycleState m_runState; + public: + TrackerBase( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) + : m_nameAndLocation( nameAndLocation ), + m_ctx( ctx ), + m_parent( parent ), + m_runState( NotStarted ) + {} + virtual ~TrackerBase(); + + virtual NameAndLocation const& nameAndLocation() const CATCH_OVERRIDE { + return m_nameAndLocation; + } + virtual bool isComplete() const CATCH_OVERRIDE { + return m_runState == CompletedSuccessfully || m_runState == Failed; + } + virtual bool isSuccessfullyCompleted() const CATCH_OVERRIDE { + return m_runState == CompletedSuccessfully; + } + virtual bool isOpen() const CATCH_OVERRIDE { + return m_runState != NotStarted && !isComplete(); + } + virtual bool hasChildren() const CATCH_OVERRIDE { + return !m_children.empty(); + } + + virtual void addChild( Ptr const& child ) CATCH_OVERRIDE { + m_children.push_back( child ); + } + + virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) CATCH_OVERRIDE { + Children::const_iterator it = std::find_if( m_children.begin(), m_children.end(), TrackerHasName( nameAndLocation ) ); + return( it != m_children.end() ) + ? it->get() + : CATCH_NULL; + } + virtual ITracker& parent() CATCH_OVERRIDE { + assert( m_parent ); // Should always be non-null except for root + return *m_parent; + } + + virtual void openChild() CATCH_OVERRIDE { + if( m_runState != ExecutingChildren ) { + m_runState = ExecutingChildren; + if( m_parent ) + m_parent->openChild(); + } + } + + virtual bool isSectionTracker() const CATCH_OVERRIDE { return false; } + virtual bool isIndexTracker() const CATCH_OVERRIDE { return false; } + + void open() { + m_runState = Executing; + moveToThis(); + if( m_parent ) + m_parent->openChild(); + } + + virtual void close() CATCH_OVERRIDE { + + // Close any still open children (e.g. generators) + while( &m_ctx.currentTracker() != this ) + m_ctx.currentTracker().close(); + + switch( m_runState ) { + case NotStarted: + case CompletedSuccessfully: + case Failed: + throw std::logic_error( "Illogical state" ); + + case NeedsAnotherRun: + break;; + + case Executing: + m_runState = CompletedSuccessfully; + break; + case ExecutingChildren: + if( m_children.empty() || m_children.back()->isComplete() ) + m_runState = CompletedSuccessfully; + break; + + default: + throw std::logic_error( "Unexpected state" ); + } + moveToParent(); + m_ctx.completeCycle(); + } + virtual void fail() CATCH_OVERRIDE { + m_runState = Failed; + if( m_parent ) + m_parent->markAsNeedingAnotherRun(); + moveToParent(); + m_ctx.completeCycle(); + } + virtual void markAsNeedingAnotherRun() CATCH_OVERRIDE { + m_runState = NeedsAnotherRun; + } + private: + void moveToParent() { + assert( m_parent ); + m_ctx.setCurrentTracker( m_parent ); + } + void moveToThis() { + m_ctx.setCurrentTracker( this ); + } + }; + + class SectionTracker : public TrackerBase { + std::vector m_filters; + public: + SectionTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) + : TrackerBase( nameAndLocation, ctx, parent ) + { + if( parent ) { + while( !parent->isSectionTracker() ) + parent = &parent->parent(); + + SectionTracker& parentSection = static_cast( *parent ); + addNextFilters( parentSection.m_filters ); + } + } + virtual ~SectionTracker(); + + virtual bool isSectionTracker() const CATCH_OVERRIDE { return true; } + + static SectionTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation ) { + SectionTracker* section = CATCH_NULL; + + ITracker& currentTracker = ctx.currentTracker(); + if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { + assert( childTracker ); + assert( childTracker->isSectionTracker() ); + section = static_cast( childTracker ); + } + else { + section = new SectionTracker( nameAndLocation, ctx, ¤tTracker ); + currentTracker.addChild( section ); + } + if( !ctx.completedCycle() ) + section->tryOpen(); + return *section; + } + + void tryOpen() { + if( !isComplete() && (m_filters.empty() || m_filters[0].empty() || m_filters[0] == m_nameAndLocation.name ) ) + open(); + } + + void addInitialFilters( std::vector const& filters ) { + if( !filters.empty() ) { + m_filters.push_back(""); // Root - should never be consulted + m_filters.push_back(""); // Test Case - not a section filter + m_filters.insert( m_filters.end(), filters.begin(), filters.end() ); + } + } + void addNextFilters( std::vector const& filters ) { + if( filters.size() > 1 ) + m_filters.insert( m_filters.end(), ++filters.begin(), filters.end() ); + } + }; + + class IndexTracker : public TrackerBase { + int m_size; + int m_index; + public: + IndexTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent, int size ) + : TrackerBase( nameAndLocation, ctx, parent ), + m_size( size ), + m_index( -1 ) + {} + virtual ~IndexTracker(); + + virtual bool isIndexTracker() const CATCH_OVERRIDE { return true; } + + static IndexTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation, int size ) { + IndexTracker* tracker = CATCH_NULL; + + ITracker& currentTracker = ctx.currentTracker(); + if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { + assert( childTracker ); + assert( childTracker->isIndexTracker() ); + tracker = static_cast( childTracker ); + } + else { + tracker = new IndexTracker( nameAndLocation, ctx, ¤tTracker, size ); + currentTracker.addChild( tracker ); + } + + if( !ctx.completedCycle() && !tracker->isComplete() ) { + if( tracker->m_runState != ExecutingChildren && tracker->m_runState != NeedsAnotherRun ) + tracker->moveNext(); + tracker->open(); + } + + return *tracker; + } + + int index() const { return m_index; } + + void moveNext() { + m_index++; + m_children.clear(); + } + + virtual void close() CATCH_OVERRIDE { + TrackerBase::close(); + if( m_runState == CompletedSuccessfully && m_index < m_size-1 ) + m_runState = Executing; + } + }; + + inline ITracker& TrackerContext::startRun() { + m_rootTracker = new SectionTracker( NameAndLocation( "{root}", CATCH_INTERNAL_LINEINFO ), *this, CATCH_NULL ); + m_currentTracker = CATCH_NULL; + m_runState = Executing; + return *m_rootTracker; + } + +} // namespace TestCaseTracking + +using TestCaseTracking::ITracker; +using TestCaseTracking::TrackerContext; +using TestCaseTracking::SectionTracker; +using TestCaseTracking::IndexTracker; + +} // namespace Catch + +CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS + +// #included from: catch_fatal_condition.hpp +#define TWOBLUECUBES_CATCH_FATAL_CONDITION_H_INCLUDED + +namespace Catch { + + // Report the error condition + inline void reportFatal( std::string const& message ) { + IContext& context = Catch::getCurrentContext(); + IResultCapture* resultCapture = context.getResultCapture(); + resultCapture->handleFatalErrorCondition( message ); + } + +} // namespace Catch + +#if defined ( CATCH_PLATFORM_WINDOWS ) ///////////////////////////////////////// +// #included from: catch_windows_h_proxy.h + +#define TWOBLUECUBES_CATCH_WINDOWS_H_PROXY_H_INCLUDED + +#ifdef CATCH_DEFINES_NOMINMAX +# define NOMINMAX +#endif +#ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +#endif + +#ifdef __AFXDLL +#include +#else +#include +#endif + +#ifdef CATCH_DEFINES_NOMINMAX +# undef NOMINMAX +#endif +#ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN +# undef WIN32_LEAN_AND_MEAN +#endif + + +# if !defined ( CATCH_CONFIG_WINDOWS_SEH ) + +namespace Catch { + struct FatalConditionHandler { + void reset() {} + }; +} + +# else // CATCH_CONFIG_WINDOWS_SEH is defined + +namespace Catch { + + struct SignalDefs { DWORD id; const char* name; }; + extern SignalDefs signalDefs[]; + // There is no 1-1 mapping between signals and windows exceptions. + // Windows can easily distinguish between SO and SigSegV, + // but SigInt, SigTerm, etc are handled differently. + SignalDefs signalDefs[] = { + { EXCEPTION_ILLEGAL_INSTRUCTION, "SIGILL - Illegal instruction signal" }, + { EXCEPTION_STACK_OVERFLOW, "SIGSEGV - Stack overflow" }, + { EXCEPTION_ACCESS_VIOLATION, "SIGSEGV - Segmentation violation signal" }, + { EXCEPTION_INT_DIVIDE_BY_ZERO, "Divide by zero error" }, + }; + + struct FatalConditionHandler { + + static LONG CALLBACK handleVectoredException(PEXCEPTION_POINTERS ExceptionInfo) { + for (int i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { + if (ExceptionInfo->ExceptionRecord->ExceptionCode == signalDefs[i].id) { + reportFatal(signalDefs[i].name); + } + } + // If its not an exception we care about, pass it along. + // This stops us from eating debugger breaks etc. + return EXCEPTION_CONTINUE_SEARCH; + } + + FatalConditionHandler() { + isSet = true; + // 32k seems enough for Catch to handle stack overflow, + // but the value was found experimentally, so there is no strong guarantee + guaranteeSize = 32 * 1024; + exceptionHandlerHandle = CATCH_NULL; + // Register as first handler in current chain + exceptionHandlerHandle = AddVectoredExceptionHandler(1, handleVectoredException); + // Pass in guarantee size to be filled + SetThreadStackGuarantee(&guaranteeSize); + } + + static void reset() { + if (isSet) { + // Unregister handler and restore the old guarantee + RemoveVectoredExceptionHandler(exceptionHandlerHandle); + SetThreadStackGuarantee(&guaranteeSize); + exceptionHandlerHandle = CATCH_NULL; + isSet = false; + } + } + + ~FatalConditionHandler() { + reset(); + } + private: + static bool isSet; + static ULONG guaranteeSize; + static PVOID exceptionHandlerHandle; + }; + + bool FatalConditionHandler::isSet = false; + ULONG FatalConditionHandler::guaranteeSize = 0; + PVOID FatalConditionHandler::exceptionHandlerHandle = CATCH_NULL; + +} // namespace Catch + +# endif // CATCH_CONFIG_WINDOWS_SEH + +#else // Not Windows - assumed to be POSIX compatible ////////////////////////// + +# if !defined(CATCH_CONFIG_POSIX_SIGNALS) + +namespace Catch { + struct FatalConditionHandler { + void reset() {} + }; +} + +# else // CATCH_CONFIG_POSIX_SIGNALS is defined + +#include + +namespace Catch { + + struct SignalDefs { + int id; + const char* name; + }; + extern SignalDefs signalDefs[]; + SignalDefs signalDefs[] = { + { SIGINT, "SIGINT - Terminal interrupt signal" }, + { SIGILL, "SIGILL - Illegal instruction signal" }, + { SIGFPE, "SIGFPE - Floating point error signal" }, + { SIGSEGV, "SIGSEGV - Segmentation violation signal" }, + { SIGTERM, "SIGTERM - Termination request signal" }, + { SIGABRT, "SIGABRT - Abort (abnormal termination) signal" } + }; + + struct FatalConditionHandler { + + static bool isSet; + static struct sigaction oldSigActions [sizeof(signalDefs)/sizeof(SignalDefs)]; + static stack_t oldSigStack; + static char altStackMem[SIGSTKSZ]; + + static void handleSignal( int sig ) { + std::string name = ""; + for (std::size_t i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { + SignalDefs &def = signalDefs[i]; + if (sig == def.id) { + name = def.name; + break; + } + } + reset(); + reportFatal(name); + raise( sig ); + } + + FatalConditionHandler() { + isSet = true; + stack_t sigStack; + sigStack.ss_sp = altStackMem; + sigStack.ss_size = SIGSTKSZ; + sigStack.ss_flags = 0; + sigaltstack(&sigStack, &oldSigStack); + struct sigaction sa = { 0 }; + + sa.sa_handler = handleSignal; + sa.sa_flags = SA_ONSTACK; + for (std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i) { + sigaction(signalDefs[i].id, &sa, &oldSigActions[i]); + } + } + + ~FatalConditionHandler() { + reset(); + } + static void reset() { + if( isSet ) { + // Set signals back to previous values -- hopefully nobody overwrote them in the meantime + for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i ) { + sigaction(signalDefs[i].id, &oldSigActions[i], CATCH_NULL); + } + // Return the old stack + sigaltstack(&oldSigStack, CATCH_NULL); + isSet = false; + } + } + }; + + bool FatalConditionHandler::isSet = false; + struct sigaction FatalConditionHandler::oldSigActions[sizeof(signalDefs)/sizeof(SignalDefs)] = {}; + stack_t FatalConditionHandler::oldSigStack = {}; + char FatalConditionHandler::altStackMem[SIGSTKSZ] = {}; + +} // namespace Catch + +# endif // CATCH_CONFIG_POSIX_SIGNALS + +#endif // not Windows + +#include +#include + +namespace Catch { + + class StreamRedirect { + + public: + StreamRedirect( std::ostream& stream, std::string& targetString ) + : m_stream( stream ), + m_prevBuf( stream.rdbuf() ), + m_targetString( targetString ) + { + stream.rdbuf( m_oss.rdbuf() ); + } + + ~StreamRedirect() { + m_targetString += m_oss.str(); + m_stream.rdbuf( m_prevBuf ); + } + + private: + std::ostream& m_stream; + std::streambuf* m_prevBuf; + std::ostringstream m_oss; + std::string& m_targetString; + }; + + /////////////////////////////////////////////////////////////////////////// + + class RunContext : public IResultCapture, public IRunner { + + RunContext( RunContext const& ); + void operator =( RunContext const& ); + + public: + + explicit RunContext( Ptr const& _config, Ptr const& reporter ) + : m_runInfo( _config->name() ), + m_context( getCurrentMutableContext() ), + m_activeTestCase( CATCH_NULL ), + m_config( _config ), + m_reporter( reporter ), + m_shouldReportUnexpected ( true ) + { + m_context.setRunner( this ); + m_context.setConfig( m_config ); + m_context.setResultCapture( this ); + m_reporter->testRunStarting( m_runInfo ); + } + + virtual ~RunContext() { + m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, aborting() ) ); + } + + void testGroupStarting( std::string const& testSpec, std::size_t groupIndex, std::size_t groupsCount ) { + m_reporter->testGroupStarting( GroupInfo( testSpec, groupIndex, groupsCount ) ); + } + void testGroupEnded( std::string const& testSpec, Totals const& totals, std::size_t groupIndex, std::size_t groupsCount ) { + m_reporter->testGroupEnded( TestGroupStats( GroupInfo( testSpec, groupIndex, groupsCount ), totals, aborting() ) ); + } + + Totals runTest( TestCase const& testCase ) { + Totals prevTotals = m_totals; + + std::string redirectedCout; + std::string redirectedCerr; + + TestCaseInfo testInfo = testCase.getTestCaseInfo(); + + m_reporter->testCaseStarting( testInfo ); + + m_activeTestCase = &testCase; + + do { + ITracker& rootTracker = m_trackerContext.startRun(); + assert( rootTracker.isSectionTracker() ); + static_cast( rootTracker ).addInitialFilters( m_config->getSectionsToRun() ); + do { + m_trackerContext.startCycle(); + m_testCaseTracker = &SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( testInfo.name, testInfo.lineInfo ) ); + runCurrentTest( redirectedCout, redirectedCerr ); + } + while( !m_testCaseTracker->isSuccessfullyCompleted() && !aborting() ); + } + // !TBD: deprecated - this will be replaced by indexed trackers + while( getCurrentContext().advanceGeneratorsForCurrentTest() && !aborting() ); + + Totals deltaTotals = m_totals.delta( prevTotals ); + if( testInfo.expectedToFail() && deltaTotals.testCases.passed > 0 ) { + deltaTotals.assertions.failed++; + deltaTotals.testCases.passed--; + deltaTotals.testCases.failed++; + } + m_totals.testCases += deltaTotals.testCases; + m_reporter->testCaseEnded( TestCaseStats( testInfo, + deltaTotals, + redirectedCout, + redirectedCerr, + aborting() ) ); + + m_activeTestCase = CATCH_NULL; + m_testCaseTracker = CATCH_NULL; + + return deltaTotals; + } + + Ptr config() const { + return m_config; + } + + private: // IResultCapture + + virtual void assertionEnded( AssertionResult const& result ) { + if( result.getResultType() == ResultWas::Ok ) { + m_totals.assertions.passed++; + } + else if( !result.isOk() ) { + m_totals.assertions.failed++; + } + + // We have no use for the return value (whether messages should be cleared), because messages were made scoped + // and should be let to clear themselves out. + static_cast(m_reporter->assertionEnded(AssertionStats(result, m_messages, m_totals))); + + // Reset working state + m_lastAssertionInfo = AssertionInfo( std::string(), m_lastAssertionInfo.lineInfo, "{Unknown expression after the reported line}" , m_lastAssertionInfo.resultDisposition ); + m_lastResult = result; + } + + virtual bool sectionStarted ( + SectionInfo const& sectionInfo, + Counts& assertions + ) + { + ITracker& sectionTracker = SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( sectionInfo.name, sectionInfo.lineInfo ) ); + if( !sectionTracker.isOpen() ) + return false; + m_activeSections.push_back( §ionTracker ); + + m_lastAssertionInfo.lineInfo = sectionInfo.lineInfo; + + m_reporter->sectionStarting( sectionInfo ); + + assertions = m_totals.assertions; + + return true; + } + bool testForMissingAssertions( Counts& assertions ) { + if( assertions.total() != 0 ) + return false; + if( !m_config->warnAboutMissingAssertions() ) + return false; + if( m_trackerContext.currentTracker().hasChildren() ) + return false; + m_totals.assertions.failed++; + assertions.failed++; + return true; + } + + virtual void sectionEnded( SectionEndInfo const& endInfo ) { + Counts assertions = m_totals.assertions - endInfo.prevAssertions; + bool missingAssertions = testForMissingAssertions( assertions ); + + if( !m_activeSections.empty() ) { + m_activeSections.back()->close(); + m_activeSections.pop_back(); + } + + m_reporter->sectionEnded( SectionStats( endInfo.sectionInfo, assertions, endInfo.durationInSeconds, missingAssertions ) ); + m_messages.clear(); + } + + virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) { + if( m_unfinishedSections.empty() ) + m_activeSections.back()->fail(); + else + m_activeSections.back()->close(); + m_activeSections.pop_back(); + + m_unfinishedSections.push_back( endInfo ); + } + + virtual void pushScopedMessage( MessageInfo const& message ) { + m_messages.push_back( message ); + } + + virtual void popScopedMessage( MessageInfo const& message ) { + m_messages.erase( std::remove( m_messages.begin(), m_messages.end(), message ), m_messages.end() ); + } + + virtual std::string getCurrentTestName() const { + return m_activeTestCase + ? m_activeTestCase->getTestCaseInfo().name + : std::string(); + } + + virtual const AssertionResult* getLastResult() const { + return &m_lastResult; + } + + virtual void exceptionEarlyReported() { + m_shouldReportUnexpected = false; + } + + virtual void handleFatalErrorCondition( std::string const& message ) { + // Don't rebuild the result -- the stringification itself can cause more fatal errors + // Instead, fake a result data. + AssertionResultData tempResult; + tempResult.resultType = ResultWas::FatalErrorCondition; + tempResult.message = message; + AssertionResult result(m_lastAssertionInfo, tempResult); + + getResultCapture().assertionEnded(result); + + handleUnfinishedSections(); + + // Recreate section for test case (as we will lose the one that was in scope) + TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); + SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); + + Counts assertions; + assertions.failed = 1; + SectionStats testCaseSectionStats( testCaseSection, assertions, 0, false ); + m_reporter->sectionEnded( testCaseSectionStats ); + + TestCaseInfo testInfo = m_activeTestCase->getTestCaseInfo(); + + Totals deltaTotals; + deltaTotals.testCases.failed = 1; + m_reporter->testCaseEnded( TestCaseStats( testInfo, + deltaTotals, + std::string(), + std::string(), + false ) ); + m_totals.testCases.failed++; + testGroupEnded( std::string(), m_totals, 1, 1 ); + m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, false ) ); + } + + public: + // !TBD We need to do this another way! + bool aborting() const { + return m_totals.assertions.failed == static_cast( m_config->abortAfter() ); + } + + private: + + void runCurrentTest( std::string& redirectedCout, std::string& redirectedCerr ) { + TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); + SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); + m_reporter->sectionStarting( testCaseSection ); + Counts prevAssertions = m_totals.assertions; + double duration = 0; + m_shouldReportUnexpected = true; + try { + m_lastAssertionInfo = AssertionInfo( "TEST_CASE", testCaseInfo.lineInfo, std::string(), ResultDisposition::Normal ); + + seedRng( *m_config ); + + Timer timer; + timer.start(); + if( m_reporter->getPreferences().shouldRedirectStdOut ) { + StreamRedirect coutRedir( Catch::cout(), redirectedCout ); + StreamRedirect cerrRedir( Catch::cerr(), redirectedCerr ); + invokeActiveTestCase(); + } + else { + invokeActiveTestCase(); + } + duration = timer.getElapsedSeconds(); + } + catch( TestFailureException& ) { + // This just means the test was aborted due to failure + } + catch(...) { + // Under CATCH_CONFIG_FAST_COMPILE, unexpected exceptions under REQUIRE assertions + // are reported without translation at the point of origin. + if (m_shouldReportUnexpected) { + makeUnexpectedResultBuilder().useActiveException(); + } + } + m_testCaseTracker->close(); + handleUnfinishedSections(); + m_messages.clear(); + + Counts assertions = m_totals.assertions - prevAssertions; + bool missingAssertions = testForMissingAssertions( assertions ); + + if( testCaseInfo.okToFail() ) { + std::swap( assertions.failedButOk, assertions.failed ); + m_totals.assertions.failed -= assertions.failedButOk; + m_totals.assertions.failedButOk += assertions.failedButOk; + } + + SectionStats testCaseSectionStats( testCaseSection, assertions, duration, missingAssertions ); + m_reporter->sectionEnded( testCaseSectionStats ); + } + + void invokeActiveTestCase() { + FatalConditionHandler fatalConditionHandler; // Handle signals + m_activeTestCase->invoke(); + fatalConditionHandler.reset(); + } + + private: + + ResultBuilder makeUnexpectedResultBuilder() const { + return ResultBuilder( m_lastAssertionInfo.macroName.c_str(), + m_lastAssertionInfo.lineInfo, + m_lastAssertionInfo.capturedExpression.c_str(), + m_lastAssertionInfo.resultDisposition ); + } + + void handleUnfinishedSections() { + // If sections ended prematurely due to an exception we stored their + // infos here so we can tear them down outside the unwind process. + for( std::vector::const_reverse_iterator it = m_unfinishedSections.rbegin(), + itEnd = m_unfinishedSections.rend(); + it != itEnd; + ++it ) + sectionEnded( *it ); + m_unfinishedSections.clear(); + } + + TestRunInfo m_runInfo; + IMutableContext& m_context; + TestCase const* m_activeTestCase; + ITracker* m_testCaseTracker; + ITracker* m_currentSectionTracker; + AssertionResult m_lastResult; + + Ptr m_config; + Totals m_totals; + Ptr m_reporter; + std::vector m_messages; + AssertionInfo m_lastAssertionInfo; + std::vector m_unfinishedSections; + std::vector m_activeSections; + TrackerContext m_trackerContext; + bool m_shouldReportUnexpected; + }; + + IResultCapture& getResultCapture() { + if( IResultCapture* capture = getCurrentContext().getResultCapture() ) + return *capture; + else + throw std::logic_error( "No result capture instance" ); + } + +} // end namespace Catch + +// #included from: internal/catch_version.h +#define TWOBLUECUBES_CATCH_VERSION_H_INCLUDED + +namespace Catch { + + // Versioning information + struct Version { + Version( unsigned int _majorVersion, + unsigned int _minorVersion, + unsigned int _patchNumber, + char const * const _branchName, + unsigned int _buildNumber ); + + unsigned int const majorVersion; + unsigned int const minorVersion; + unsigned int const patchNumber; + + // buildNumber is only used if branchName is not null + char const * const branchName; + unsigned int const buildNumber; + + friend std::ostream& operator << ( std::ostream& os, Version const& version ); + + private: + void operator=( Version const& ); + }; + + inline Version libraryVersion(); +} + +#include +#include +#include + +namespace Catch { + + Ptr createReporter( std::string const& reporterName, Ptr const& config ) { + Ptr reporter = getRegistryHub().getReporterRegistry().create( reporterName, config.get() ); + if( !reporter ) { + std::ostringstream oss; + oss << "No reporter registered with name: '" << reporterName << "'"; + throw std::domain_error( oss.str() ); + } + return reporter; + } + + Ptr makeReporter( Ptr const& config ) { + std::vector reporters = config->getReporterNames(); + if( reporters.empty() ) + reporters.push_back( "console" ); + + Ptr reporter; + for( std::vector::const_iterator it = reporters.begin(), itEnd = reporters.end(); + it != itEnd; + ++it ) + reporter = addReporter( reporter, createReporter( *it, config ) ); + return reporter; + } + Ptr addListeners( Ptr const& config, Ptr reporters ) { + IReporterRegistry::Listeners listeners = getRegistryHub().getReporterRegistry().getListeners(); + for( IReporterRegistry::Listeners::const_iterator it = listeners.begin(), itEnd = listeners.end(); + it != itEnd; + ++it ) + reporters = addReporter(reporters, (*it)->create( ReporterConfig( config ) ) ); + return reporters; + } + + Totals runTests( Ptr const& config ) { + + Ptr iconfig = config.get(); + + Ptr reporter = makeReporter( config ); + reporter = addListeners( iconfig, reporter ); + + RunContext context( iconfig, reporter ); + + Totals totals; + + context.testGroupStarting( config->name(), 1, 1 ); + + TestSpec testSpec = config->testSpec(); + if( !testSpec.hasFilters() ) + testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "~[.]" ).testSpec(); // All not hidden tests + + std::vector const& allTestCases = getAllTestCasesSorted( *iconfig ); + for( std::vector::const_iterator it = allTestCases.begin(), itEnd = allTestCases.end(); + it != itEnd; + ++it ) { + if( !context.aborting() && matchTest( *it, testSpec, *iconfig ) ) + totals += context.runTest( *it ); + else + reporter->skipTest( *it ); + } + + context.testGroupEnded( iconfig->name(), totals, 1, 1 ); + return totals; + } + + void applyFilenamesAsTags( IConfig const& config ) { + std::vector const& tests = getAllTestCasesSorted( config ); + for(std::size_t i = 0; i < tests.size(); ++i ) { + TestCase& test = const_cast( tests[i] ); + std::set tags = test.tags; + + std::string filename = test.lineInfo.file; + std::string::size_type lastSlash = filename.find_last_of( "\\/" ); + if( lastSlash != std::string::npos ) + filename = filename.substr( lastSlash+1 ); + + std::string::size_type lastDot = filename.find_last_of( "." ); + if( lastDot != std::string::npos ) + filename = filename.substr( 0, lastDot ); + + tags.insert( "#" + filename ); + setTags( test, tags ); + } + } + + class Session : NonCopyable { + static bool alreadyInstantiated; + + public: + + struct OnUnusedOptions { enum DoWhat { Ignore, Fail }; }; + + Session() + : m_cli( makeCommandLineParser() ) { + if( alreadyInstantiated ) { + std::string msg = "Only one instance of Catch::Session can ever be used"; + Catch::cerr() << msg << std::endl; + throw std::logic_error( msg ); + } + alreadyInstantiated = true; + } + ~Session() { + Catch::cleanUp(); + } + + void showHelp( std::string const& processName ) { + Catch::cout() << "\nCatch v" << libraryVersion() << "\n"; + + m_cli.usage( Catch::cout(), processName ); + Catch::cout() << "For more detail usage please see the project docs\n" << std::endl; + } + + int applyCommandLine( int argc, char const* const* const argv, OnUnusedOptions::DoWhat unusedOptionBehaviour = OnUnusedOptions::Fail ) { + try { + m_cli.setThrowOnUnrecognisedTokens( unusedOptionBehaviour == OnUnusedOptions::Fail ); + m_unusedTokens = m_cli.parseInto( Clara::argsToVector( argc, argv ), m_configData ); + if( m_configData.showHelp ) + showHelp( m_configData.processName ); + m_config.reset(); + } + catch( std::exception& ex ) { + { + Colour colourGuard( Colour::Red ); + Catch::cerr() + << "\nError(s) in input:\n" + << Text( ex.what(), TextAttributes().setIndent(2) ) + << "\n\n"; + } + m_cli.usage( Catch::cout(), m_configData.processName ); + return (std::numeric_limits::max)(); + } + return 0; + } + + void useConfigData( ConfigData const& _configData ) { + m_configData = _configData; + m_config.reset(); + } + + int run( int argc, char const* const* const argv ) { + + int returnCode = applyCommandLine( argc, argv ); + if( returnCode == 0 ) + returnCode = run(); + return returnCode; + } + + #if defined(WIN32) && defined(UNICODE) + int run( int argc, wchar_t const* const* const argv ) { + + char **utf8Argv = new char *[ argc ]; + + for ( int i = 0; i < argc; ++i ) { + int bufSize = WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, NULL, 0, NULL, NULL ); + + utf8Argv[ i ] = new char[ bufSize ]; + + WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, utf8Argv[i], bufSize, NULL, NULL ); + } + + int returnCode = applyCommandLine( argc, utf8Argv ); + if( returnCode == 0 ) + returnCode = run(); + + for ( int i = 0; i < argc; ++i ) + delete [] utf8Argv[ i ]; + + delete [] utf8Argv; + + return returnCode; + } + #endif + + int run() { + if( m_configData.showHelp ) + return 0; + + try + { + config(); // Force config to be constructed + + seedRng( *m_config ); + + if( m_configData.filenamesAsTags ) + applyFilenamesAsTags( *m_config ); + + // Handle list request + if( Option listed = list( config() ) ) + return static_cast( *listed ); + + return static_cast( runTests( m_config ).assertions.failed ); + } + catch( std::exception& ex ) { + Catch::cerr() << ex.what() << std::endl; + return (std::numeric_limits::max)(); + } + } + + Clara::CommandLine const& cli() const { + return m_cli; + } + std::vector const& unusedTokens() const { + return m_unusedTokens; + } + ConfigData& configData() { + return m_configData; + } + Config& config() { + if( !m_config ) + m_config = new Config( m_configData ); + return *m_config; + } + private: + Clara::CommandLine m_cli; + std::vector m_unusedTokens; + ConfigData m_configData; + Ptr m_config; + }; + + bool Session::alreadyInstantiated = false; + +} // end namespace Catch + +// #included from: catch_registry_hub.hpp +#define TWOBLUECUBES_CATCH_REGISTRY_HUB_HPP_INCLUDED + +// #included from: catch_test_case_registry_impl.hpp +#define TWOBLUECUBES_CATCH_TEST_CASE_REGISTRY_IMPL_HPP_INCLUDED + +#include +#include +#include +#include + +namespace Catch { + + struct RandomNumberGenerator { + typedef std::ptrdiff_t result_type; + + result_type operator()( result_type n ) const { return std::rand() % n; } + +#ifdef CATCH_CONFIG_CPP11_SHUFFLE + static constexpr result_type min() { return 0; } + static constexpr result_type max() { return 1000000; } + result_type operator()() const { return std::rand() % max(); } +#endif + template + static void shuffle( V& vector ) { + RandomNumberGenerator rng; +#ifdef CATCH_CONFIG_CPP11_SHUFFLE + std::shuffle( vector.begin(), vector.end(), rng ); +#else + std::random_shuffle( vector.begin(), vector.end(), rng ); +#endif + } + }; + + inline std::vector sortTests( IConfig const& config, std::vector const& unsortedTestCases ) { + + std::vector sorted = unsortedTestCases; + + switch( config.runOrder() ) { + case RunTests::InLexicographicalOrder: + std::sort( sorted.begin(), sorted.end() ); + break; + case RunTests::InRandomOrder: + { + seedRng( config ); + RandomNumberGenerator::shuffle( sorted ); + } + break; + case RunTests::InDeclarationOrder: + // already in declaration order + break; + } + return sorted; + } + bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ) { + return testSpec.matches( testCase ) && ( config.allowThrows() || !testCase.throws() ); + } + + void enforceNoDuplicateTestCases( std::vector const& functions ) { + std::set seenFunctions; + for( std::vector::const_iterator it = functions.begin(), itEnd = functions.end(); + it != itEnd; + ++it ) { + std::pair::const_iterator, bool> prev = seenFunctions.insert( *it ); + if( !prev.second ) { + std::ostringstream ss; + + ss << Colour( Colour::Red ) + << "error: TEST_CASE( \"" << it->name << "\" ) already defined.\n" + << "\tFirst seen at " << prev.first->getTestCaseInfo().lineInfo << '\n' + << "\tRedefined at " << it->getTestCaseInfo().lineInfo << std::endl; + + throw std::runtime_error(ss.str()); + } + } + } + + std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ) { + std::vector filtered; + filtered.reserve( testCases.size() ); + for( std::vector::const_iterator it = testCases.begin(), itEnd = testCases.end(); + it != itEnd; + ++it ) + if( matchTest( *it, testSpec, config ) ) + filtered.push_back( *it ); + return filtered; + } + std::vector const& getAllTestCasesSorted( IConfig const& config ) { + return getRegistryHub().getTestCaseRegistry().getAllTestsSorted( config ); + } + + class TestRegistry : public ITestCaseRegistry { + public: + TestRegistry() + : m_currentSortOrder( RunTests::InDeclarationOrder ), + m_unnamedCount( 0 ) + {} + virtual ~TestRegistry(); + + virtual void registerTest( TestCase const& testCase ) { + std::string name = testCase.getTestCaseInfo().name; + if( name.empty() ) { + std::ostringstream oss; + oss << "Anonymous test case " << ++m_unnamedCount; + return registerTest( testCase.withName( oss.str() ) ); + } + m_functions.push_back( testCase ); + } + + virtual std::vector const& getAllTests() const { + return m_functions; + } + virtual std::vector const& getAllTestsSorted( IConfig const& config ) const { + if( m_sortedFunctions.empty() ) + enforceNoDuplicateTestCases( m_functions ); + + if( m_currentSortOrder != config.runOrder() || m_sortedFunctions.empty() ) { + m_sortedFunctions = sortTests( config, m_functions ); + m_currentSortOrder = config.runOrder(); + } + return m_sortedFunctions; + } + + private: + std::vector m_functions; + mutable RunTests::InWhatOrder m_currentSortOrder; + mutable std::vector m_sortedFunctions; + size_t m_unnamedCount; + std::ios_base::Init m_ostreamInit; // Forces cout/ cerr to be initialised + }; + + /////////////////////////////////////////////////////////////////////////// + + class FreeFunctionTestCase : public SharedImpl { + public: + + FreeFunctionTestCase( TestFunction fun ) : m_fun( fun ) {} + + virtual void invoke() const { + m_fun(); + } + + private: + virtual ~FreeFunctionTestCase(); + + TestFunction m_fun; + }; + + inline std::string extractClassName( std::string const& classOrQualifiedMethodName ) { + std::string className = classOrQualifiedMethodName; + if( startsWith( className, '&' ) ) + { + std::size_t lastColons = className.rfind( "::" ); + std::size_t penultimateColons = className.rfind( "::", lastColons-1 ); + if( penultimateColons == std::string::npos ) + penultimateColons = 1; + className = className.substr( penultimateColons, lastColons-penultimateColons ); + } + return className; + } + + void registerTestCase + ( ITestCase* testCase, + char const* classOrQualifiedMethodName, + NameAndDesc const& nameAndDesc, + SourceLineInfo const& lineInfo ) { + + getMutableRegistryHub().registerTest + ( makeTestCase + ( testCase, + extractClassName( classOrQualifiedMethodName ), + nameAndDesc.name, + nameAndDesc.description, + lineInfo ) ); + } + void registerTestCaseFunction + ( TestFunction function, + SourceLineInfo const& lineInfo, + NameAndDesc const& nameAndDesc ) { + registerTestCase( new FreeFunctionTestCase( function ), "", nameAndDesc, lineInfo ); + } + + /////////////////////////////////////////////////////////////////////////// + + AutoReg::AutoReg + ( TestFunction function, + SourceLineInfo const& lineInfo, + NameAndDesc const& nameAndDesc ) { + registerTestCaseFunction( function, lineInfo, nameAndDesc ); + } + + AutoReg::~AutoReg() {} + +} // end namespace Catch + +// #included from: catch_reporter_registry.hpp +#define TWOBLUECUBES_CATCH_REPORTER_REGISTRY_HPP_INCLUDED + +#include + +namespace Catch { + + class ReporterRegistry : public IReporterRegistry { + + public: + + virtual ~ReporterRegistry() CATCH_OVERRIDE {} + + virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const CATCH_OVERRIDE { + FactoryMap::const_iterator it = m_factories.find( name ); + if( it == m_factories.end() ) + return CATCH_NULL; + return it->second->create( ReporterConfig( config ) ); + } + + void registerReporter( std::string const& name, Ptr const& factory ) { + m_factories.insert( std::make_pair( name, factory ) ); + } + void registerListener( Ptr const& factory ) { + m_listeners.push_back( factory ); + } + + virtual FactoryMap const& getFactories() const CATCH_OVERRIDE { + return m_factories; + } + virtual Listeners const& getListeners() const CATCH_OVERRIDE { + return m_listeners; + } + + private: + FactoryMap m_factories; + Listeners m_listeners; + }; +} + +// #included from: catch_exception_translator_registry.hpp +#define TWOBLUECUBES_CATCH_EXCEPTION_TRANSLATOR_REGISTRY_HPP_INCLUDED + +#ifdef __OBJC__ +#import "Foundation/Foundation.h" +#endif + +namespace Catch { + + class ExceptionTranslatorRegistry : public IExceptionTranslatorRegistry { + public: + ~ExceptionTranslatorRegistry() { + deleteAll( m_translators ); + } + + virtual void registerTranslator( const IExceptionTranslator* translator ) { + m_translators.push_back( translator ); + } + + virtual std::string translateActiveException() const { + try { +#ifdef __OBJC__ + // In Objective-C try objective-c exceptions first + @try { + return tryTranslators(); + } + @catch (NSException *exception) { + return Catch::toString( [exception description] ); + } +#else + return tryTranslators(); +#endif + } + catch( TestFailureException& ) { + throw; + } + catch( std::exception& ex ) { + return ex.what(); + } + catch( std::string& msg ) { + return msg; + } + catch( const char* msg ) { + return msg; + } + catch(...) { + return "Unknown exception"; + } + } + + std::string tryTranslators() const { + if( m_translators.empty() ) + throw; + else + return m_translators[0]->translate( m_translators.begin()+1, m_translators.end() ); + } + + private: + std::vector m_translators; + }; +} + +// #included from: catch_tag_alias_registry.h +#define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_H_INCLUDED + +#include + +namespace Catch { + + class TagAliasRegistry : public ITagAliasRegistry { + public: + virtual ~TagAliasRegistry(); + virtual Option find( std::string const& alias ) const; + virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const; + void add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ); + + private: + std::map m_registry; + }; + +} // end namespace Catch + +namespace Catch { + + namespace { + + class RegistryHub : public IRegistryHub, public IMutableRegistryHub { + + RegistryHub( RegistryHub const& ); + void operator=( RegistryHub const& ); + + public: // IRegistryHub + RegistryHub() { + } + virtual IReporterRegistry const& getReporterRegistry() const CATCH_OVERRIDE { + return m_reporterRegistry; + } + virtual ITestCaseRegistry const& getTestCaseRegistry() const CATCH_OVERRIDE { + return m_testCaseRegistry; + } + virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() CATCH_OVERRIDE { + return m_exceptionTranslatorRegistry; + } + virtual ITagAliasRegistry const& getTagAliasRegistry() const CATCH_OVERRIDE { + return m_tagAliasRegistry; + } + + public: // IMutableRegistryHub + virtual void registerReporter( std::string const& name, Ptr const& factory ) CATCH_OVERRIDE { + m_reporterRegistry.registerReporter( name, factory ); + } + virtual void registerListener( Ptr const& factory ) CATCH_OVERRIDE { + m_reporterRegistry.registerListener( factory ); + } + virtual void registerTest( TestCase const& testInfo ) CATCH_OVERRIDE { + m_testCaseRegistry.registerTest( testInfo ); + } + virtual void registerTranslator( const IExceptionTranslator* translator ) CATCH_OVERRIDE { + m_exceptionTranslatorRegistry.registerTranslator( translator ); + } + virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) CATCH_OVERRIDE { + m_tagAliasRegistry.add( alias, tag, lineInfo ); + } + + private: + TestRegistry m_testCaseRegistry; + ReporterRegistry m_reporterRegistry; + ExceptionTranslatorRegistry m_exceptionTranslatorRegistry; + TagAliasRegistry m_tagAliasRegistry; + }; + + // Single, global, instance + inline RegistryHub*& getTheRegistryHub() { + static RegistryHub* theRegistryHub = CATCH_NULL; + if( !theRegistryHub ) + theRegistryHub = new RegistryHub(); + return theRegistryHub; + } + } + + IRegistryHub& getRegistryHub() { + return *getTheRegistryHub(); + } + IMutableRegistryHub& getMutableRegistryHub() { + return *getTheRegistryHub(); + } + void cleanUp() { + delete getTheRegistryHub(); + getTheRegistryHub() = CATCH_NULL; + cleanUpContext(); + } + std::string translateActiveException() { + return getRegistryHub().getExceptionTranslatorRegistry().translateActiveException(); + } + +} // end namespace Catch + +// #included from: catch_notimplemented_exception.hpp +#define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_HPP_INCLUDED + +#include + +namespace Catch { + + NotImplementedException::NotImplementedException( SourceLineInfo const& lineInfo ) + : m_lineInfo( lineInfo ) { + std::ostringstream oss; + oss << lineInfo << ": function "; + oss << "not implemented"; + m_what = oss.str(); + } + + const char* NotImplementedException::what() const CATCH_NOEXCEPT { + return m_what.c_str(); + } + +} // end namespace Catch + +// #included from: catch_context_impl.hpp +#define TWOBLUECUBES_CATCH_CONTEXT_IMPL_HPP_INCLUDED + +// #included from: catch_stream.hpp +#define TWOBLUECUBES_CATCH_STREAM_HPP_INCLUDED + +#include +#include +#include + +namespace Catch { + + template + class StreamBufImpl : public StreamBufBase { + char data[bufferSize]; + WriterF m_writer; + + public: + StreamBufImpl() { + setp( data, data + sizeof(data) ); + } + + ~StreamBufImpl() CATCH_NOEXCEPT { + sync(); + } + + private: + int overflow( int c ) { + sync(); + + if( c != EOF ) { + if( pbase() == epptr() ) + m_writer( std::string( 1, static_cast( c ) ) ); + else + sputc( static_cast( c ) ); + } + return 0; + } + + int sync() { + if( pbase() != pptr() ) { + m_writer( std::string( pbase(), static_cast( pptr() - pbase() ) ) ); + setp( pbase(), epptr() ); + } + return 0; + } + }; + + /////////////////////////////////////////////////////////////////////////// + + FileStream::FileStream( std::string const& filename ) { + m_ofs.open( filename.c_str() ); + if( m_ofs.fail() ) { + std::ostringstream oss; + oss << "Unable to open file: '" << filename << '\''; + throw std::domain_error( oss.str() ); + } + } + + std::ostream& FileStream::stream() const { + return m_ofs; + } + + struct OutputDebugWriter { + + void operator()( std::string const&str ) { + writeToDebugConsole( str ); + } + }; + + DebugOutStream::DebugOutStream() + : m_streamBuf( new StreamBufImpl() ), + m_os( m_streamBuf.get() ) + {} + + std::ostream& DebugOutStream::stream() const { + return m_os; + } + + // Store the streambuf from cout up-front because + // cout may get redirected when running tests + CoutStream::CoutStream() + : m_os( Catch::cout().rdbuf() ) + {} + + std::ostream& CoutStream::stream() const { + return m_os; + } + +#ifndef CATCH_CONFIG_NOSTDOUT // If you #define this you must implement these functions + std::ostream& cout() { + return std::cout; + } + std::ostream& cerr() { + return std::cerr; + } +#endif +} + +namespace Catch { + + class Context : public IMutableContext { + + Context() : m_config( CATCH_NULL ), m_runner( CATCH_NULL ), m_resultCapture( CATCH_NULL ) {} + Context( Context const& ); + void operator=( Context const& ); + + public: + virtual ~Context() { + deleteAllValues( m_generatorsByTestName ); + } + + public: // IContext + virtual IResultCapture* getResultCapture() { + return m_resultCapture; + } + virtual IRunner* getRunner() { + return m_runner; + } + virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) { + return getGeneratorsForCurrentTest() + .getGeneratorInfo( fileInfo, totalSize ) + .getCurrentIndex(); + } + virtual bool advanceGeneratorsForCurrentTest() { + IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); + return generators && generators->moveNext(); + } + + virtual Ptr getConfig() const { + return m_config; + } + + public: // IMutableContext + virtual void setResultCapture( IResultCapture* resultCapture ) { + m_resultCapture = resultCapture; + } + virtual void setRunner( IRunner* runner ) { + m_runner = runner; + } + virtual void setConfig( Ptr const& config ) { + m_config = config; + } + + friend IMutableContext& getCurrentMutableContext(); + + private: + IGeneratorsForTest* findGeneratorsForCurrentTest() { + std::string testName = getResultCapture()->getCurrentTestName(); + + std::map::const_iterator it = + m_generatorsByTestName.find( testName ); + return it != m_generatorsByTestName.end() + ? it->second + : CATCH_NULL; + } + + IGeneratorsForTest& getGeneratorsForCurrentTest() { + IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); + if( !generators ) { + std::string testName = getResultCapture()->getCurrentTestName(); + generators = createGeneratorsForTest(); + m_generatorsByTestName.insert( std::make_pair( testName, generators ) ); + } + return *generators; + } + + private: + Ptr m_config; + IRunner* m_runner; + IResultCapture* m_resultCapture; + std::map m_generatorsByTestName; + }; + + namespace { + Context* currentContext = CATCH_NULL; + } + IMutableContext& getCurrentMutableContext() { + if( !currentContext ) + currentContext = new Context(); + return *currentContext; + } + IContext& getCurrentContext() { + return getCurrentMutableContext(); + } + + void cleanUpContext() { + delete currentContext; + currentContext = CATCH_NULL; + } +} + +// #included from: catch_console_colour_impl.hpp +#define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_IMPL_HPP_INCLUDED + +// #included from: catch_errno_guard.hpp +#define TWOBLUECUBES_CATCH_ERRNO_GUARD_HPP_INCLUDED + +#include + +namespace Catch { + + class ErrnoGuard { + public: + ErrnoGuard():m_oldErrno(errno){} + ~ErrnoGuard() { errno = m_oldErrno; } + private: + int m_oldErrno; + }; + +} + +namespace Catch { + namespace { + + struct IColourImpl { + virtual ~IColourImpl() {} + virtual void use( Colour::Code _colourCode ) = 0; + }; + + struct NoColourImpl : IColourImpl { + void use( Colour::Code ) {} + + static IColourImpl* instance() { + static NoColourImpl s_instance; + return &s_instance; + } + }; + + } // anon namespace +} // namespace Catch + +#if !defined( CATCH_CONFIG_COLOUR_NONE ) && !defined( CATCH_CONFIG_COLOUR_WINDOWS ) && !defined( CATCH_CONFIG_COLOUR_ANSI ) +# ifdef CATCH_PLATFORM_WINDOWS +# define CATCH_CONFIG_COLOUR_WINDOWS +# else +# define CATCH_CONFIG_COLOUR_ANSI +# endif +#endif + +#if defined ( CATCH_CONFIG_COLOUR_WINDOWS ) ///////////////////////////////////////// + +namespace Catch { +namespace { + + class Win32ColourImpl : public IColourImpl { + public: + Win32ColourImpl() : stdoutHandle( GetStdHandle(STD_OUTPUT_HANDLE) ) + { + CONSOLE_SCREEN_BUFFER_INFO csbiInfo; + GetConsoleScreenBufferInfo( stdoutHandle, &csbiInfo ); + originalForegroundAttributes = csbiInfo.wAttributes & ~( BACKGROUND_GREEN | BACKGROUND_RED | BACKGROUND_BLUE | BACKGROUND_INTENSITY ); + originalBackgroundAttributes = csbiInfo.wAttributes & ~( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY ); + } + + virtual void use( Colour::Code _colourCode ) { + switch( _colourCode ) { + case Colour::None: return setTextAttribute( originalForegroundAttributes ); + case Colour::White: return setTextAttribute( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); + case Colour::Red: return setTextAttribute( FOREGROUND_RED ); + case Colour::Green: return setTextAttribute( FOREGROUND_GREEN ); + case Colour::Blue: return setTextAttribute( FOREGROUND_BLUE ); + case Colour::Cyan: return setTextAttribute( FOREGROUND_BLUE | FOREGROUND_GREEN ); + case Colour::Yellow: return setTextAttribute( FOREGROUND_RED | FOREGROUND_GREEN ); + case Colour::Grey: return setTextAttribute( 0 ); + + case Colour::LightGrey: return setTextAttribute( FOREGROUND_INTENSITY ); + case Colour::BrightRed: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_RED ); + case Colour::BrightGreen: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN ); + case Colour::BrightWhite: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); + + case Colour::Bright: throw std::logic_error( "not a colour" ); + } + } + + private: + void setTextAttribute( WORD _textAttribute ) { + SetConsoleTextAttribute( stdoutHandle, _textAttribute | originalBackgroundAttributes ); + } + HANDLE stdoutHandle; + WORD originalForegroundAttributes; + WORD originalBackgroundAttributes; + }; + + IColourImpl* platformColourInstance() { + static Win32ColourImpl s_instance; + + Ptr config = getCurrentContext().getConfig(); + UseColour::YesOrNo colourMode = config + ? config->useColour() + : UseColour::Auto; + if( colourMode == UseColour::Auto ) + colourMode = !isDebuggerActive() + ? UseColour::Yes + : UseColour::No; + return colourMode == UseColour::Yes + ? &s_instance + : NoColourImpl::instance(); + } + +} // end anon namespace +} // end namespace Catch + +#elif defined( CATCH_CONFIG_COLOUR_ANSI ) ////////////////////////////////////// + +#include + +namespace Catch { +namespace { + + // use POSIX/ ANSI console terminal codes + // Thanks to Adam Strzelecki for original contribution + // (http://github.com/nanoant) + // https://github.com/philsquared/Catch/pull/131 + class PosixColourImpl : public IColourImpl { + public: + virtual void use( Colour::Code _colourCode ) { + switch( _colourCode ) { + case Colour::None: + case Colour::White: return setColour( "[0m" ); + case Colour::Red: return setColour( "[0;31m" ); + case Colour::Green: return setColour( "[0;32m" ); + case Colour::Blue: return setColour( "[0;34m" ); + case Colour::Cyan: return setColour( "[0;36m" ); + case Colour::Yellow: return setColour( "[0;33m" ); + case Colour::Grey: return setColour( "[1;30m" ); + + case Colour::LightGrey: return setColour( "[0;37m" ); + case Colour::BrightRed: return setColour( "[1;31m" ); + case Colour::BrightGreen: return setColour( "[1;32m" ); + case Colour::BrightWhite: return setColour( "[1;37m" ); + + case Colour::Bright: throw std::logic_error( "not a colour" ); + } + } + static IColourImpl* instance() { + static PosixColourImpl s_instance; + return &s_instance; + } + + private: + void setColour( const char* _escapeCode ) { + Catch::cout() << '\033' << _escapeCode; + } + }; + + IColourImpl* platformColourInstance() { + ErrnoGuard guard; + Ptr config = getCurrentContext().getConfig(); + UseColour::YesOrNo colourMode = config + ? config->useColour() + : UseColour::Auto; + if( colourMode == UseColour::Auto ) + colourMode = (!isDebuggerActive() && isatty(STDOUT_FILENO) ) + ? UseColour::Yes + : UseColour::No; + return colourMode == UseColour::Yes + ? PosixColourImpl::instance() + : NoColourImpl::instance(); + } + +} // end anon namespace +} // end namespace Catch + +#else // not Windows or ANSI /////////////////////////////////////////////// + +namespace Catch { + + static IColourImpl* platformColourInstance() { return NoColourImpl::instance(); } + +} // end namespace Catch + +#endif // Windows/ ANSI/ None + +namespace Catch { + + Colour::Colour( Code _colourCode ) : m_moved( false ) { use( _colourCode ); } + Colour::Colour( Colour const& _other ) : m_moved( false ) { const_cast( _other ).m_moved = true; } + Colour::~Colour(){ if( !m_moved ) use( None ); } + + void Colour::use( Code _colourCode ) { + static IColourImpl* impl = platformColourInstance(); + impl->use( _colourCode ); + } + +} // end namespace Catch + +// #included from: catch_generators_impl.hpp +#define TWOBLUECUBES_CATCH_GENERATORS_IMPL_HPP_INCLUDED + +#include +#include +#include + +namespace Catch { + + struct GeneratorInfo : IGeneratorInfo { + + GeneratorInfo( std::size_t size ) + : m_size( size ), + m_currentIndex( 0 ) + {} + + bool moveNext() { + if( ++m_currentIndex == m_size ) { + m_currentIndex = 0; + return false; + } + return true; + } + + std::size_t getCurrentIndex() const { + return m_currentIndex; + } + + std::size_t m_size; + std::size_t m_currentIndex; + }; + + /////////////////////////////////////////////////////////////////////////// + + class GeneratorsForTest : public IGeneratorsForTest { + + public: + ~GeneratorsForTest() { + deleteAll( m_generatorsInOrder ); + } + + IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) { + std::map::const_iterator it = m_generatorsByName.find( fileInfo ); + if( it == m_generatorsByName.end() ) { + IGeneratorInfo* info = new GeneratorInfo( size ); + m_generatorsByName.insert( std::make_pair( fileInfo, info ) ); + m_generatorsInOrder.push_back( info ); + return *info; + } + return *it->second; + } + + bool moveNext() { + std::vector::const_iterator it = m_generatorsInOrder.begin(); + std::vector::const_iterator itEnd = m_generatorsInOrder.end(); + for(; it != itEnd; ++it ) { + if( (*it)->moveNext() ) + return true; + } + return false; + } + + private: + std::map m_generatorsByName; + std::vector m_generatorsInOrder; + }; + + IGeneratorsForTest* createGeneratorsForTest() + { + return new GeneratorsForTest(); + } + +} // end namespace Catch + +// #included from: catch_assertionresult.hpp +#define TWOBLUECUBES_CATCH_ASSERTIONRESULT_HPP_INCLUDED + +namespace Catch { + + AssertionInfo::AssertionInfo( std::string const& _macroName, + SourceLineInfo const& _lineInfo, + std::string const& _capturedExpression, + ResultDisposition::Flags _resultDisposition ) + : macroName( _macroName ), + lineInfo( _lineInfo ), + capturedExpression( _capturedExpression ), + resultDisposition( _resultDisposition ) + {} + + AssertionResult::AssertionResult() {} + + AssertionResult::AssertionResult( AssertionInfo const& info, AssertionResultData const& data ) + : m_info( info ), + m_resultData( data ) + {} + + AssertionResult::~AssertionResult() {} + + // Result was a success + bool AssertionResult::succeeded() const { + return Catch::isOk( m_resultData.resultType ); + } + + // Result was a success, or failure is suppressed + bool AssertionResult::isOk() const { + return Catch::isOk( m_resultData.resultType ) || shouldSuppressFailure( m_info.resultDisposition ); + } + + ResultWas::OfType AssertionResult::getResultType() const { + return m_resultData.resultType; + } + + bool AssertionResult::hasExpression() const { + return !m_info.capturedExpression.empty(); + } + + bool AssertionResult::hasMessage() const { + return !m_resultData.message.empty(); + } + + std::string AssertionResult::getExpression() const { + if( isFalseTest( m_info.resultDisposition ) ) + return '!' + m_info.capturedExpression; + else + return m_info.capturedExpression; + } + std::string AssertionResult::getExpressionInMacro() const { + if( m_info.macroName.empty() ) + return m_info.capturedExpression; + else + return m_info.macroName + "( " + m_info.capturedExpression + " )"; + } + + bool AssertionResult::hasExpandedExpression() const { + return hasExpression() && getExpandedExpression() != getExpression(); + } + + std::string AssertionResult::getExpandedExpression() const { + return m_resultData.reconstructExpression(); + } + + std::string AssertionResult::getMessage() const { + return m_resultData.message; + } + SourceLineInfo AssertionResult::getSourceInfo() const { + return m_info.lineInfo; + } + + std::string AssertionResult::getTestMacroName() const { + return m_info.macroName; + } + + void AssertionResult::discardDecomposedExpression() const { + m_resultData.decomposedExpression = CATCH_NULL; + } + + void AssertionResult::expandDecomposedExpression() const { + m_resultData.reconstructExpression(); + } + +} // end namespace Catch + +// #included from: catch_test_case_info.hpp +#define TWOBLUECUBES_CATCH_TEST_CASE_INFO_HPP_INCLUDED + +#include + +namespace Catch { + + inline TestCaseInfo::SpecialProperties parseSpecialTag( std::string const& tag ) { + if( startsWith( tag, '.' ) || + tag == "hide" || + tag == "!hide" ) + return TestCaseInfo::IsHidden; + else if( tag == "!throws" ) + return TestCaseInfo::Throws; + else if( tag == "!shouldfail" ) + return TestCaseInfo::ShouldFail; + else if( tag == "!mayfail" ) + return TestCaseInfo::MayFail; + else if( tag == "!nonportable" ) + return TestCaseInfo::NonPortable; + else + return TestCaseInfo::None; + } + inline bool isReservedTag( std::string const& tag ) { + return parseSpecialTag( tag ) == TestCaseInfo::None && tag.size() > 0 && !std::isalnum( tag[0] ); + } + inline void enforceNotReservedTag( std::string const& tag, SourceLineInfo const& _lineInfo ) { + if( isReservedTag( tag ) ) { + std::ostringstream ss; + ss << Colour(Colour::Red) + << "Tag name [" << tag << "] not allowed.\n" + << "Tag names starting with non alpha-numeric characters are reserved\n" + << Colour(Colour::FileName) + << _lineInfo << '\n'; + throw std::runtime_error(ss.str()); + } + } + + TestCase makeTestCase( ITestCase* _testCase, + std::string const& _className, + std::string const& _name, + std::string const& _descOrTags, + SourceLineInfo const& _lineInfo ) + { + bool isHidden( startsWith( _name, "./" ) ); // Legacy support + + // Parse out tags + std::set tags; + std::string desc, tag; + bool inTag = false; + for( std::size_t i = 0; i < _descOrTags.size(); ++i ) { + char c = _descOrTags[i]; + if( !inTag ) { + if( c == '[' ) + inTag = true; + else + desc += c; + } + else { + if( c == ']' ) { + TestCaseInfo::SpecialProperties prop = parseSpecialTag( tag ); + if( prop == TestCaseInfo::IsHidden ) + isHidden = true; + else if( prop == TestCaseInfo::None ) + enforceNotReservedTag( tag, _lineInfo ); + + tags.insert( tag ); + tag.clear(); + inTag = false; + } + else + tag += c; + } + } + if( isHidden ) { + tags.insert( "hide" ); + tags.insert( "." ); + } + + TestCaseInfo info( _name, _className, desc, tags, _lineInfo ); + return TestCase( _testCase, info ); + } + + void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ) + { + testCaseInfo.tags = tags; + testCaseInfo.lcaseTags.clear(); + + std::ostringstream oss; + for( std::set::const_iterator it = tags.begin(), itEnd = tags.end(); it != itEnd; ++it ) { + oss << '[' << *it << ']'; + std::string lcaseTag = toLower( *it ); + testCaseInfo.properties = static_cast( testCaseInfo.properties | parseSpecialTag( lcaseTag ) ); + testCaseInfo.lcaseTags.insert( lcaseTag ); + } + testCaseInfo.tagsAsString = oss.str(); + } + + TestCaseInfo::TestCaseInfo( std::string const& _name, + std::string const& _className, + std::string const& _description, + std::set const& _tags, + SourceLineInfo const& _lineInfo ) + : name( _name ), + className( _className ), + description( _description ), + lineInfo( _lineInfo ), + properties( None ) + { + setTags( *this, _tags ); + } + + TestCaseInfo::TestCaseInfo( TestCaseInfo const& other ) + : name( other.name ), + className( other.className ), + description( other.description ), + tags( other.tags ), + lcaseTags( other.lcaseTags ), + tagsAsString( other.tagsAsString ), + lineInfo( other.lineInfo ), + properties( other.properties ) + {} + + bool TestCaseInfo::isHidden() const { + return ( properties & IsHidden ) != 0; + } + bool TestCaseInfo::throws() const { + return ( properties & Throws ) != 0; + } + bool TestCaseInfo::okToFail() const { + return ( properties & (ShouldFail | MayFail ) ) != 0; + } + bool TestCaseInfo::expectedToFail() const { + return ( properties & (ShouldFail ) ) != 0; + } + + TestCase::TestCase( ITestCase* testCase, TestCaseInfo const& info ) : TestCaseInfo( info ), test( testCase ) {} + + TestCase::TestCase( TestCase const& other ) + : TestCaseInfo( other ), + test( other.test ) + {} + + TestCase TestCase::withName( std::string const& _newName ) const { + TestCase other( *this ); + other.name = _newName; + return other; + } + + void TestCase::swap( TestCase& other ) { + test.swap( other.test ); + name.swap( other.name ); + className.swap( other.className ); + description.swap( other.description ); + tags.swap( other.tags ); + lcaseTags.swap( other.lcaseTags ); + tagsAsString.swap( other.tagsAsString ); + std::swap( TestCaseInfo::properties, static_cast( other ).properties ); + std::swap( lineInfo, other.lineInfo ); + } + + void TestCase::invoke() const { + test->invoke(); + } + + bool TestCase::operator == ( TestCase const& other ) const { + return test.get() == other.test.get() && + name == other.name && + className == other.className; + } + + bool TestCase::operator < ( TestCase const& other ) const { + return name < other.name; + } + TestCase& TestCase::operator = ( TestCase const& other ) { + TestCase temp( other ); + swap( temp ); + return *this; + } + + TestCaseInfo const& TestCase::getTestCaseInfo() const + { + return *this; + } + +} // end namespace Catch + +// #included from: catch_version.hpp +#define TWOBLUECUBES_CATCH_VERSION_HPP_INCLUDED + +namespace Catch { + + Version::Version + ( unsigned int _majorVersion, + unsigned int _minorVersion, + unsigned int _patchNumber, + char const * const _branchName, + unsigned int _buildNumber ) + : majorVersion( _majorVersion ), + minorVersion( _minorVersion ), + patchNumber( _patchNumber ), + branchName( _branchName ), + buildNumber( _buildNumber ) + {} + + std::ostream& operator << ( std::ostream& os, Version const& version ) { + os << version.majorVersion << '.' + << version.minorVersion << '.' + << version.patchNumber; + // branchName is never null -> 0th char is \0 if it is empty + if (version.branchName[0]) { + os << '-' << version.branchName + << '.' << version.buildNumber; + } + return os; + } + + inline Version libraryVersion() { + static Version version( 1, 9, 4, "", 0 ); + return version; + } + +} + +// #included from: catch_message.hpp +#define TWOBLUECUBES_CATCH_MESSAGE_HPP_INCLUDED + +namespace Catch { + + MessageInfo::MessageInfo( std::string const& _macroName, + SourceLineInfo const& _lineInfo, + ResultWas::OfType _type ) + : macroName( _macroName ), + lineInfo( _lineInfo ), + type( _type ), + sequence( ++globalCount ) + {} + + // This may need protecting if threading support is added + unsigned int MessageInfo::globalCount = 0; + + //////////////////////////////////////////////////////////////////////////// + + ScopedMessage::ScopedMessage( MessageBuilder const& builder ) + : m_info( builder.m_info ) + { + m_info.message = builder.m_stream.str(); + getResultCapture().pushScopedMessage( m_info ); + } + ScopedMessage::ScopedMessage( ScopedMessage const& other ) + : m_info( other.m_info ) + {} + + ScopedMessage::~ScopedMessage() { + if ( !std::uncaught_exception() ){ + getResultCapture().popScopedMessage(m_info); + } + } + +} // end namespace Catch + +// #included from: catch_legacy_reporter_adapter.hpp +#define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_HPP_INCLUDED + +// #included from: catch_legacy_reporter_adapter.h +#define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_H_INCLUDED + +namespace Catch +{ + // Deprecated + struct IReporter : IShared { + virtual ~IReporter(); + + virtual bool shouldRedirectStdout() const = 0; + + virtual void StartTesting() = 0; + virtual void EndTesting( Totals const& totals ) = 0; + virtual void StartGroup( std::string const& groupName ) = 0; + virtual void EndGroup( std::string const& groupName, Totals const& totals ) = 0; + virtual void StartTestCase( TestCaseInfo const& testInfo ) = 0; + virtual void EndTestCase( TestCaseInfo const& testInfo, Totals const& totals, std::string const& stdOut, std::string const& stdErr ) = 0; + virtual void StartSection( std::string const& sectionName, std::string const& description ) = 0; + virtual void EndSection( std::string const& sectionName, Counts const& assertions ) = 0; + virtual void NoAssertionsInSection( std::string const& sectionName ) = 0; + virtual void NoAssertionsInTestCase( std::string const& testName ) = 0; + virtual void Aborted() = 0; + virtual void Result( AssertionResult const& result ) = 0; + }; + + class LegacyReporterAdapter : public SharedImpl + { + public: + LegacyReporterAdapter( Ptr const& legacyReporter ); + virtual ~LegacyReporterAdapter(); + + virtual ReporterPreferences getPreferences() const; + virtual void noMatchingTestCases( std::string const& ); + virtual void testRunStarting( TestRunInfo const& ); + virtual void testGroupStarting( GroupInfo const& groupInfo ); + virtual void testCaseStarting( TestCaseInfo const& testInfo ); + virtual void sectionStarting( SectionInfo const& sectionInfo ); + virtual void assertionStarting( AssertionInfo const& ); + virtual bool assertionEnded( AssertionStats const& assertionStats ); + virtual void sectionEnded( SectionStats const& sectionStats ); + virtual void testCaseEnded( TestCaseStats const& testCaseStats ); + virtual void testGroupEnded( TestGroupStats const& testGroupStats ); + virtual void testRunEnded( TestRunStats const& testRunStats ); + virtual void skipTest( TestCaseInfo const& ); + + private: + Ptr m_legacyReporter; + }; +} + +namespace Catch +{ + LegacyReporterAdapter::LegacyReporterAdapter( Ptr const& legacyReporter ) + : m_legacyReporter( legacyReporter ) + {} + LegacyReporterAdapter::~LegacyReporterAdapter() {} + + ReporterPreferences LegacyReporterAdapter::getPreferences() const { + ReporterPreferences prefs; + prefs.shouldRedirectStdOut = m_legacyReporter->shouldRedirectStdout(); + return prefs; + } + + void LegacyReporterAdapter::noMatchingTestCases( std::string const& ) {} + void LegacyReporterAdapter::testRunStarting( TestRunInfo const& ) { + m_legacyReporter->StartTesting(); + } + void LegacyReporterAdapter::testGroupStarting( GroupInfo const& groupInfo ) { + m_legacyReporter->StartGroup( groupInfo.name ); + } + void LegacyReporterAdapter::testCaseStarting( TestCaseInfo const& testInfo ) { + m_legacyReporter->StartTestCase( testInfo ); + } + void LegacyReporterAdapter::sectionStarting( SectionInfo const& sectionInfo ) { + m_legacyReporter->StartSection( sectionInfo.name, sectionInfo.description ); + } + void LegacyReporterAdapter::assertionStarting( AssertionInfo const& ) { + // Not on legacy interface + } + + bool LegacyReporterAdapter::assertionEnded( AssertionStats const& assertionStats ) { + if( assertionStats.assertionResult.getResultType() != ResultWas::Ok ) { + for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); + it != itEnd; + ++it ) { + if( it->type == ResultWas::Info ) { + ResultBuilder rb( it->macroName.c_str(), it->lineInfo, "", ResultDisposition::Normal ); + rb << it->message; + rb.setResultType( ResultWas::Info ); + AssertionResult result = rb.build(); + m_legacyReporter->Result( result ); + } + } + } + m_legacyReporter->Result( assertionStats.assertionResult ); + return true; + } + void LegacyReporterAdapter::sectionEnded( SectionStats const& sectionStats ) { + if( sectionStats.missingAssertions ) + m_legacyReporter->NoAssertionsInSection( sectionStats.sectionInfo.name ); + m_legacyReporter->EndSection( sectionStats.sectionInfo.name, sectionStats.assertions ); + } + void LegacyReporterAdapter::testCaseEnded( TestCaseStats const& testCaseStats ) { + m_legacyReporter->EndTestCase + ( testCaseStats.testInfo, + testCaseStats.totals, + testCaseStats.stdOut, + testCaseStats.stdErr ); + } + void LegacyReporterAdapter::testGroupEnded( TestGroupStats const& testGroupStats ) { + if( testGroupStats.aborting ) + m_legacyReporter->Aborted(); + m_legacyReporter->EndGroup( testGroupStats.groupInfo.name, testGroupStats.totals ); + } + void LegacyReporterAdapter::testRunEnded( TestRunStats const& testRunStats ) { + m_legacyReporter->EndTesting( testRunStats.totals ); + } + void LegacyReporterAdapter::skipTest( TestCaseInfo const& ) { + } +} + +// #included from: catch_timer.hpp + +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc++11-long-long" +#endif + +#ifdef CATCH_PLATFORM_WINDOWS + +#else + +#include + +#endif + +namespace Catch { + + namespace { +#ifdef CATCH_PLATFORM_WINDOWS + UInt64 getCurrentTicks() { + static UInt64 hz=0, hzo=0; + if (!hz) { + QueryPerformanceFrequency( reinterpret_cast( &hz ) ); + QueryPerformanceCounter( reinterpret_cast( &hzo ) ); + } + UInt64 t; + QueryPerformanceCounter( reinterpret_cast( &t ) ); + return ((t-hzo)*1000000)/hz; + } +#else + UInt64 getCurrentTicks() { + timeval t; + gettimeofday(&t,CATCH_NULL); + return static_cast( t.tv_sec ) * 1000000ull + static_cast( t.tv_usec ); + } +#endif + } + + void Timer::start() { + m_ticks = getCurrentTicks(); + } + unsigned int Timer::getElapsedMicroseconds() const { + return static_cast(getCurrentTicks() - m_ticks); + } + unsigned int Timer::getElapsedMilliseconds() const { + return static_cast(getElapsedMicroseconds()/1000); + } + double Timer::getElapsedSeconds() const { + return getElapsedMicroseconds()/1000000.0; + } + +} // namespace Catch + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif +// #included from: catch_common.hpp +#define TWOBLUECUBES_CATCH_COMMON_HPP_INCLUDED + +#include +#include + +namespace Catch { + + bool startsWith( std::string const& s, std::string const& prefix ) { + return s.size() >= prefix.size() && std::equal(prefix.begin(), prefix.end(), s.begin()); + } + bool startsWith( std::string const& s, char prefix ) { + return !s.empty() && s[0] == prefix; + } + bool endsWith( std::string const& s, std::string const& suffix ) { + return s.size() >= suffix.size() && std::equal(suffix.rbegin(), suffix.rend(), s.rbegin()); + } + bool endsWith( std::string const& s, char suffix ) { + return !s.empty() && s[s.size()-1] == suffix; + } + bool contains( std::string const& s, std::string const& infix ) { + return s.find( infix ) != std::string::npos; + } + char toLowerCh(char c) { + return static_cast( std::tolower( c ) ); + } + void toLowerInPlace( std::string& s ) { + std::transform( s.begin(), s.end(), s.begin(), toLowerCh ); + } + std::string toLower( std::string const& s ) { + std::string lc = s; + toLowerInPlace( lc ); + return lc; + } + std::string trim( std::string const& str ) { + static char const* whitespaceChars = "\n\r\t "; + std::string::size_type start = str.find_first_not_of( whitespaceChars ); + std::string::size_type end = str.find_last_not_of( whitespaceChars ); + + return start != std::string::npos ? str.substr( start, 1+end-start ) : std::string(); + } + + bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ) { + bool replaced = false; + std::size_t i = str.find( replaceThis ); + while( i != std::string::npos ) { + replaced = true; + str = str.substr( 0, i ) + withThis + str.substr( i+replaceThis.size() ); + if( i < str.size()-withThis.size() ) + i = str.find( replaceThis, i+withThis.size() ); + else + i = std::string::npos; + } + return replaced; + } + + pluralise::pluralise( std::size_t count, std::string const& label ) + : m_count( count ), + m_label( label ) + {} + + std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ) { + os << pluraliser.m_count << ' ' << pluraliser.m_label; + if( pluraliser.m_count != 1 ) + os << 's'; + return os; + } + + SourceLineInfo::SourceLineInfo() : file(""), line( 0 ){} + SourceLineInfo::SourceLineInfo( char const* _file, std::size_t _line ) + : file( _file ), + line( _line ) + {} + bool SourceLineInfo::empty() const { + return file[0] == '\0'; + } + bool SourceLineInfo::operator == ( SourceLineInfo const& other ) const { + return line == other.line && (file == other.file || std::strcmp(file, other.file) == 0); + } + bool SourceLineInfo::operator < ( SourceLineInfo const& other ) const { + return line < other.line || ( line == other.line && (std::strcmp(file, other.file) < 0)); + } + + void seedRng( IConfig const& config ) { + if( config.rngSeed() != 0 ) + std::srand( config.rngSeed() ); + } + unsigned int rngSeed() { + return getCurrentContext().getConfig()->rngSeed(); + } + + std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ) { +#ifndef __GNUG__ + os << info.file << '(' << info.line << ')'; +#else + os << info.file << ':' << info.line; +#endif + return os; + } + + void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ) { + std::ostringstream oss; + oss << locationInfo << ": Internal Catch error: '" << message << '\''; + if( alwaysTrue() ) + throw std::logic_error( oss.str() ); + } +} + +// #included from: catch_section.hpp +#define TWOBLUECUBES_CATCH_SECTION_HPP_INCLUDED + +namespace Catch { + + SectionInfo::SectionInfo + ( SourceLineInfo const& _lineInfo, + std::string const& _name, + std::string const& _description ) + : name( _name ), + description( _description ), + lineInfo( _lineInfo ) + {} + + Section::Section( SectionInfo const& info ) + : m_info( info ), + m_sectionIncluded( getResultCapture().sectionStarted( m_info, m_assertions ) ) + { + m_timer.start(); + } + + Section::~Section() { + if( m_sectionIncluded ) { + SectionEndInfo endInfo( m_info, m_assertions, m_timer.getElapsedSeconds() ); + if( std::uncaught_exception() ) + getResultCapture().sectionEndedEarly( endInfo ); + else + getResultCapture().sectionEnded( endInfo ); + } + } + + // This indicates whether the section should be executed or not + Section::operator bool() const { + return m_sectionIncluded; + } + +} // end namespace Catch + +// #included from: catch_debugger.hpp +#define TWOBLUECUBES_CATCH_DEBUGGER_HPP_INCLUDED + +#ifdef CATCH_PLATFORM_MAC + + #include + #include + #include + #include + #include + + namespace Catch{ + + // The following function is taken directly from the following technical note: + // http://developer.apple.com/library/mac/#qa/qa2004/qa1361.html + + // Returns true if the current process is being debugged (either + // running under the debugger or has a debugger attached post facto). + bool isDebuggerActive(){ + + int mib[4]; + struct kinfo_proc info; + size_t size; + + // Initialize the flags so that, if sysctl fails for some bizarre + // reason, we get a predictable result. + + info.kp_proc.p_flag = 0; + + // Initialize mib, which tells sysctl the info we want, in this case + // we're looking for information about a specific process ID. + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + + // Call sysctl. + + size = sizeof(info); + if( sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, CATCH_NULL, 0) != 0 ) { + Catch::cerr() << "\n** Call to sysctl failed - unable to determine if debugger is active **\n" << std::endl; + return false; + } + + // We're being debugged if the P_TRACED flag is set. + + return ( (info.kp_proc.p_flag & P_TRACED) != 0 ); + } + } // namespace Catch + +#elif defined(CATCH_PLATFORM_LINUX) + #include + #include + + namespace Catch{ + // The standard POSIX way of detecting a debugger is to attempt to + // ptrace() the process, but this needs to be done from a child and not + // this process itself to still allow attaching to this process later + // if wanted, so is rather heavy. Under Linux we have the PID of the + // "debugger" (which doesn't need to be gdb, of course, it could also + // be strace, for example) in /proc/$PID/status, so just get it from + // there instead. + bool isDebuggerActive(){ + // Libstdc++ has a bug, where std::ifstream sets errno to 0 + // This way our users can properly assert over errno values + ErrnoGuard guard; + std::ifstream in("/proc/self/status"); + for( std::string line; std::getline(in, line); ) { + static const int PREFIX_LEN = 11; + if( line.compare(0, PREFIX_LEN, "TracerPid:\t") == 0 ) { + // We're traced if the PID is not 0 and no other PID starts + // with 0 digit, so it's enough to check for just a single + // character. + return line.length() > PREFIX_LEN && line[PREFIX_LEN] != '0'; + } + } + + return false; + } + } // namespace Catch +#elif defined(_MSC_VER) + extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); + namespace Catch { + bool isDebuggerActive() { + return IsDebuggerPresent() != 0; + } + } +#elif defined(__MINGW32__) + extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); + namespace Catch { + bool isDebuggerActive() { + return IsDebuggerPresent() != 0; + } + } +#else + namespace Catch { + inline bool isDebuggerActive() { return false; } + } +#endif // Platform + +#ifdef CATCH_PLATFORM_WINDOWS + + namespace Catch { + void writeToDebugConsole( std::string const& text ) { + ::OutputDebugStringA( text.c_str() ); + } + } +#else + namespace Catch { + void writeToDebugConsole( std::string const& text ) { + // !TBD: Need a version for Mac/ XCode and other IDEs + Catch::cout() << text; + } + } +#endif // Platform + +// #included from: catch_tostring.hpp +#define TWOBLUECUBES_CATCH_TOSTRING_HPP_INCLUDED + +namespace Catch { + +namespace Detail { + + const std::string unprintableString = "{?}"; + + namespace { + const int hexThreshold = 255; + + struct Endianness { + enum Arch { Big, Little }; + + static Arch which() { + union _{ + int asInt; + char asChar[sizeof (int)]; + } u; + + u.asInt = 1; + return ( u.asChar[sizeof(int)-1] == 1 ) ? Big : Little; + } + }; + } + + std::string rawMemoryToString( const void *object, std::size_t size ) + { + // Reverse order for little endian architectures + int i = 0, end = static_cast( size ), inc = 1; + if( Endianness::which() == Endianness::Little ) { + i = end-1; + end = inc = -1; + } + + unsigned char const *bytes = static_cast(object); + std::ostringstream os; + os << "0x" << std::setfill('0') << std::hex; + for( ; i != end; i += inc ) + os << std::setw(2) << static_cast(bytes[i]); + return os.str(); + } +} + +std::string toString( std::string const& value ) { + std::string s = value; + if( getCurrentContext().getConfig()->showInvisibles() ) { + for(size_t i = 0; i < s.size(); ++i ) { + std::string subs; + switch( s[i] ) { + case '\n': subs = "\\n"; break; + case '\t': subs = "\\t"; break; + default: break; + } + if( !subs.empty() ) { + s = s.substr( 0, i ) + subs + s.substr( i+1 ); + ++i; + } + } + } + return '"' + s + '"'; +} +std::string toString( std::wstring const& value ) { + + std::string s; + s.reserve( value.size() ); + for(size_t i = 0; i < value.size(); ++i ) + s += value[i] <= 0xff ? static_cast( value[i] ) : '?'; + return Catch::toString( s ); +} + +std::string toString( const char* const value ) { + return value ? Catch::toString( std::string( value ) ) : std::string( "{null string}" ); +} + +std::string toString( char* const value ) { + return Catch::toString( static_cast( value ) ); +} + +std::string toString( const wchar_t* const value ) +{ + return value ? Catch::toString( std::wstring(value) ) : std::string( "{null string}" ); +} + +std::string toString( wchar_t* const value ) +{ + return Catch::toString( static_cast( value ) ); +} + +std::string toString( int value ) { + std::ostringstream oss; + oss << value; + if( value > Detail::hexThreshold ) + oss << " (0x" << std::hex << value << ')'; + return oss.str(); +} + +std::string toString( unsigned long value ) { + std::ostringstream oss; + oss << value; + if( value > Detail::hexThreshold ) + oss << " (0x" << std::hex << value << ')'; + return oss.str(); +} + +std::string toString( unsigned int value ) { + return Catch::toString( static_cast( value ) ); +} + +template +std::string fpToString( T value, int precision ) { + std::ostringstream oss; + oss << std::setprecision( precision ) + << std::fixed + << value; + std::string d = oss.str(); + std::size_t i = d.find_last_not_of( '0' ); + if( i != std::string::npos && i != d.size()-1 ) { + if( d[i] == '.' ) + i++; + d = d.substr( 0, i+1 ); + } + return d; +} + +std::string toString( const double value ) { + return fpToString( value, 10 ); +} +std::string toString( const float value ) { + return fpToString( value, 5 ) + 'f'; +} + +std::string toString( bool value ) { + return value ? "true" : "false"; +} + +std::string toString( char value ) { + if ( value == '\r' ) + return "'\\r'"; + if ( value == '\f' ) + return "'\\f'"; + if ( value == '\n' ) + return "'\\n'"; + if ( value == '\t' ) + return "'\\t'"; + if ( '\0' <= value && value < ' ' ) + return toString( static_cast( value ) ); + char chstr[] = "' '"; + chstr[1] = value; + return chstr; +} + +std::string toString( signed char value ) { + return toString( static_cast( value ) ); +} + +std::string toString( unsigned char value ) { + return toString( static_cast( value ) ); +} + +#ifdef CATCH_CONFIG_CPP11_LONG_LONG +std::string toString( long long value ) { + std::ostringstream oss; + oss << value; + if( value > Detail::hexThreshold ) + oss << " (0x" << std::hex << value << ')'; + return oss.str(); +} +std::string toString( unsigned long long value ) { + std::ostringstream oss; + oss << value; + if( value > Detail::hexThreshold ) + oss << " (0x" << std::hex << value << ')'; + return oss.str(); +} +#endif + +#ifdef CATCH_CONFIG_CPP11_NULLPTR +std::string toString( std::nullptr_t ) { + return "nullptr"; +} +#endif + +#ifdef __OBJC__ + std::string toString( NSString const * const& nsstring ) { + if( !nsstring ) + return "nil"; + return "@" + toString([nsstring UTF8String]); + } + std::string toString( NSString * CATCH_ARC_STRONG const& nsstring ) { + if( !nsstring ) + return "nil"; + return "@" + toString([nsstring UTF8String]); + } + std::string toString( NSObject* const& nsObject ) { + return toString( [nsObject description] ); + } +#endif + +} // end namespace Catch + +// #included from: catch_result_builder.hpp +#define TWOBLUECUBES_CATCH_RESULT_BUILDER_HPP_INCLUDED + +namespace Catch { + + std::string capturedExpressionWithSecondArgument( std::string const& capturedExpression, std::string const& secondArg ) { + return secondArg.empty() || secondArg == "\"\"" + ? capturedExpression + : capturedExpression + ", " + secondArg; + } + ResultBuilder::ResultBuilder( char const* macroName, + SourceLineInfo const& lineInfo, + char const* capturedExpression, + ResultDisposition::Flags resultDisposition, + char const* secondArg ) + : m_assertionInfo( macroName, lineInfo, capturedExpressionWithSecondArgument( capturedExpression, secondArg ), resultDisposition ), + m_shouldDebugBreak( false ), + m_shouldThrow( false ), + m_guardException( false ) + {} + + ResultBuilder::~ResultBuilder() { +#if defined(CATCH_CONFIG_FAST_COMPILE) + if ( m_guardException ) { + m_stream.oss << "Exception translation was disabled by CATCH_CONFIG_FAST_COMPILE"; + captureResult( ResultWas::ThrewException ); + getCurrentContext().getResultCapture()->exceptionEarlyReported(); + } +#endif + } + + ResultBuilder& ResultBuilder::setResultType( ResultWas::OfType result ) { + m_data.resultType = result; + return *this; + } + ResultBuilder& ResultBuilder::setResultType( bool result ) { + m_data.resultType = result ? ResultWas::Ok : ResultWas::ExpressionFailed; + return *this; + } + + void ResultBuilder::endExpression( DecomposedExpression const& expr ) { + AssertionResult result = build( expr ); + handleResult( result ); + } + + void ResultBuilder::useActiveException( ResultDisposition::Flags resultDisposition ) { + m_assertionInfo.resultDisposition = resultDisposition; + m_stream.oss << Catch::translateActiveException(); + captureResult( ResultWas::ThrewException ); + } + + void ResultBuilder::captureResult( ResultWas::OfType resultType ) { + setResultType( resultType ); + captureExpression(); + } + + void ResultBuilder::captureExpectedException( std::string const& expectedMessage ) { + if( expectedMessage.empty() ) + captureExpectedException( Matchers::Impl::MatchAllOf() ); + else + captureExpectedException( Matchers::Equals( expectedMessage ) ); + } + + void ResultBuilder::captureExpectedException( Matchers::Impl::MatcherBase const& matcher ) { + + assert( !isFalseTest( m_assertionInfo.resultDisposition ) ); + AssertionResultData data = m_data; + data.resultType = ResultWas::Ok; + data.reconstructedExpression = m_assertionInfo.capturedExpression; + + std::string actualMessage = Catch::translateActiveException(); + if( !matcher.match( actualMessage ) ) { + data.resultType = ResultWas::ExpressionFailed; + data.reconstructedExpression = actualMessage; + } + AssertionResult result( m_assertionInfo, data ); + handleResult( result ); + } + + void ResultBuilder::captureExpression() { + AssertionResult result = build(); + handleResult( result ); + } + + void ResultBuilder::handleResult( AssertionResult const& result ) + { + getResultCapture().assertionEnded( result ); + + if( !result.isOk() ) { + if( getCurrentContext().getConfig()->shouldDebugBreak() ) + m_shouldDebugBreak = true; + if( getCurrentContext().getRunner()->aborting() || (m_assertionInfo.resultDisposition & ResultDisposition::Normal) ) + m_shouldThrow = true; + } + } + + void ResultBuilder::react() { +#if defined(CATCH_CONFIG_FAST_COMPILE) + if (m_shouldDebugBreak) { + /////////////////////////////////////////////////////////////////// + // To inspect the state during test, you need to go one level up the callstack + // To go back to the test and change execution, jump over the throw statement + /////////////////////////////////////////////////////////////////// + CATCH_BREAK_INTO_DEBUGGER(); + } +#endif + if( m_shouldThrow ) + throw Catch::TestFailureException(); + } + + bool ResultBuilder::shouldDebugBreak() const { return m_shouldDebugBreak; } + bool ResultBuilder::allowThrows() const { return getCurrentContext().getConfig()->allowThrows(); } + + AssertionResult ResultBuilder::build() const + { + return build( *this ); + } + + // CAVEAT: The returned AssertionResult stores a pointer to the argument expr, + // a temporary DecomposedExpression, which in turn holds references to + // operands, possibly temporary as well. + // It should immediately be passed to handleResult; if the expression + // needs to be reported, its string expansion must be composed before + // the temporaries are destroyed. + AssertionResult ResultBuilder::build( DecomposedExpression const& expr ) const + { + assert( m_data.resultType != ResultWas::Unknown ); + AssertionResultData data = m_data; + + // Flip bool results if FalseTest flag is set + if( isFalseTest( m_assertionInfo.resultDisposition ) ) { + data.negate( expr.isBinaryExpression() ); + } + + data.message = m_stream.oss.str(); + data.decomposedExpression = &expr; // for lazy reconstruction + return AssertionResult( m_assertionInfo, data ); + } + + void ResultBuilder::reconstructExpression( std::string& dest ) const { + dest = m_assertionInfo.capturedExpression; + } + + void ResultBuilder::setExceptionGuard() { + m_guardException = true; + } + void ResultBuilder::unsetExceptionGuard() { + m_guardException = false; + } + +} // end namespace Catch + +// #included from: catch_tag_alias_registry.hpp +#define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_HPP_INCLUDED + +namespace Catch { + + TagAliasRegistry::~TagAliasRegistry() {} + + Option TagAliasRegistry::find( std::string const& alias ) const { + std::map::const_iterator it = m_registry.find( alias ); + if( it != m_registry.end() ) + return it->second; + else + return Option(); + } + + std::string TagAliasRegistry::expandAliases( std::string const& unexpandedTestSpec ) const { + std::string expandedTestSpec = unexpandedTestSpec; + for( std::map::const_iterator it = m_registry.begin(), itEnd = m_registry.end(); + it != itEnd; + ++it ) { + std::size_t pos = expandedTestSpec.find( it->first ); + if( pos != std::string::npos ) { + expandedTestSpec = expandedTestSpec.substr( 0, pos ) + + it->second.tag + + expandedTestSpec.substr( pos + it->first.size() ); + } + } + return expandedTestSpec; + } + + void TagAliasRegistry::add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) { + + if( !startsWith( alias, "[@" ) || !endsWith( alias, ']' ) ) { + std::ostringstream oss; + oss << Colour( Colour::Red ) + << "error: tag alias, \"" << alias << "\" is not of the form [@alias name].\n" + << Colour( Colour::FileName ) + << lineInfo << '\n'; + throw std::domain_error( oss.str().c_str() ); + } + if( !m_registry.insert( std::make_pair( alias, TagAlias( tag, lineInfo ) ) ).second ) { + std::ostringstream oss; + oss << Colour( Colour::Red ) + << "error: tag alias, \"" << alias << "\" already registered.\n" + << "\tFirst seen at " + << Colour( Colour::Red ) << find(alias)->lineInfo << '\n' + << Colour( Colour::Red ) << "\tRedefined at " + << Colour( Colour::FileName) << lineInfo << '\n'; + throw std::domain_error( oss.str().c_str() ); + } + } + + ITagAliasRegistry::~ITagAliasRegistry() {} + + ITagAliasRegistry const& ITagAliasRegistry::get() { + return getRegistryHub().getTagAliasRegistry(); + } + + RegistrarForTagAliases::RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ) { + getMutableRegistryHub().registerTagAlias( alias, tag, lineInfo ); + } + +} // end namespace Catch + +// #included from: catch_matchers_string.hpp + +namespace Catch { +namespace Matchers { + + namespace StdString { + + CasedString::CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ) + : m_caseSensitivity( caseSensitivity ), + m_str( adjustString( str ) ) + {} + std::string CasedString::adjustString( std::string const& str ) const { + return m_caseSensitivity == CaseSensitive::No + ? toLower( str ) + : str; + } + std::string CasedString::caseSensitivitySuffix() const { + return m_caseSensitivity == CaseSensitive::No + ? " (case insensitive)" + : std::string(); + } + + StringMatcherBase::StringMatcherBase( std::string const& operation, CasedString const& comparator ) + : m_comparator( comparator ), + m_operation( operation ) { + } + + std::string StringMatcherBase::describe() const { + std::string description; + description.reserve(5 + m_operation.size() + m_comparator.m_str.size() + + m_comparator.caseSensitivitySuffix().size()); + description += m_operation; + description += ": \""; + description += m_comparator.m_str; + description += "\""; + description += m_comparator.caseSensitivitySuffix(); + return description; + } + + EqualsMatcher::EqualsMatcher( CasedString const& comparator ) : StringMatcherBase( "equals", comparator ) {} + + bool EqualsMatcher::match( std::string const& source ) const { + return m_comparator.adjustString( source ) == m_comparator.m_str; + } + + ContainsMatcher::ContainsMatcher( CasedString const& comparator ) : StringMatcherBase( "contains", comparator ) {} + + bool ContainsMatcher::match( std::string const& source ) const { + return contains( m_comparator.adjustString( source ), m_comparator.m_str ); + } + + StartsWithMatcher::StartsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "starts with", comparator ) {} + + bool StartsWithMatcher::match( std::string const& source ) const { + return startsWith( m_comparator.adjustString( source ), m_comparator.m_str ); + } + + EndsWithMatcher::EndsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "ends with", comparator ) {} + + bool EndsWithMatcher::match( std::string const& source ) const { + return endsWith( m_comparator.adjustString( source ), m_comparator.m_str ); + } + + } // namespace StdString + + StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity ) { + return StdString::EqualsMatcher( StdString::CasedString( str, caseSensitivity) ); + } + StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity ) { + return StdString::ContainsMatcher( StdString::CasedString( str, caseSensitivity) ); + } + StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { + return StdString::EndsWithMatcher( StdString::CasedString( str, caseSensitivity) ); + } + StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { + return StdString::StartsWithMatcher( StdString::CasedString( str, caseSensitivity) ); + } + +} // namespace Matchers +} // namespace Catch +// #included from: ../reporters/catch_reporter_multi.hpp +#define TWOBLUECUBES_CATCH_REPORTER_MULTI_HPP_INCLUDED + +namespace Catch { + +class MultipleReporters : public SharedImpl { + typedef std::vector > Reporters; + Reporters m_reporters; + +public: + void add( Ptr const& reporter ) { + m_reporters.push_back( reporter ); + } + +public: // IStreamingReporter + + virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { + return m_reporters[0]->getPreferences(); + } + + virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->noMatchingTestCases( spec ); + } + + virtual void testRunStarting( TestRunInfo const& testRunInfo ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->testRunStarting( testRunInfo ); + } + + virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->testGroupStarting( groupInfo ); + } + + virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->testCaseStarting( testInfo ); + } + + virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->sectionStarting( sectionInfo ); + } + + virtual void assertionStarting( AssertionInfo const& assertionInfo ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->assertionStarting( assertionInfo ); + } + + // The return value indicates if the messages buffer should be cleared: + virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { + bool clearBuffer = false; + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + clearBuffer |= (*it)->assertionEnded( assertionStats ); + return clearBuffer; + } + + virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->sectionEnded( sectionStats ); + } + + virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->testCaseEnded( testCaseStats ); + } + + virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->testGroupEnded( testGroupStats ); + } + + virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->testRunEnded( testRunStats ); + } + + virtual void skipTest( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { + for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); + it != itEnd; + ++it ) + (*it)->skipTest( testInfo ); + } + + virtual MultipleReporters* tryAsMulti() CATCH_OVERRIDE { + return this; + } + +}; + +Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ) { + Ptr resultingReporter; + + if( existingReporter ) { + MultipleReporters* multi = existingReporter->tryAsMulti(); + if( !multi ) { + multi = new MultipleReporters; + resultingReporter = Ptr( multi ); + if( existingReporter ) + multi->add( existingReporter ); + } + else + resultingReporter = existingReporter; + multi->add( additionalReporter ); + } + else + resultingReporter = additionalReporter; + + return resultingReporter; +} + +} // end namespace Catch + +// #included from: ../reporters/catch_reporter_xml.hpp +#define TWOBLUECUBES_CATCH_REPORTER_XML_HPP_INCLUDED + +// #included from: catch_reporter_bases.hpp +#define TWOBLUECUBES_CATCH_REPORTER_BASES_HPP_INCLUDED + +#include +#include +#include +#include + +namespace Catch { + + namespace { + // Because formatting using c++ streams is stateful, drop down to C is required + // Alternatively we could use stringstream, but its performance is... not good. + std::string getFormattedDuration( double duration ) { + // Max exponent + 1 is required to represent the whole part + // + 1 for decimal point + // + 3 for the 3 decimal places + // + 1 for null terminator + const size_t maxDoubleSize = DBL_MAX_10_EXP + 1 + 1 + 3 + 1; + char buffer[maxDoubleSize]; + + // Save previous errno, to prevent sprintf from overwriting it + ErrnoGuard guard; +#ifdef _MSC_VER + sprintf_s(buffer, "%.3f", duration); +#else + sprintf(buffer, "%.3f", duration); +#endif + return std::string(buffer); + } + } + + struct StreamingReporterBase : SharedImpl { + + StreamingReporterBase( ReporterConfig const& _config ) + : m_config( _config.fullConfig() ), + stream( _config.stream() ) + { + m_reporterPrefs.shouldRedirectStdOut = false; + } + + virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { + return m_reporterPrefs; + } + + virtual ~StreamingReporterBase() CATCH_OVERRIDE; + + virtual void noMatchingTestCases( std::string const& ) CATCH_OVERRIDE {} + + virtual void testRunStarting( TestRunInfo const& _testRunInfo ) CATCH_OVERRIDE { + currentTestRunInfo = _testRunInfo; + } + virtual void testGroupStarting( GroupInfo const& _groupInfo ) CATCH_OVERRIDE { + currentGroupInfo = _groupInfo; + } + + virtual void testCaseStarting( TestCaseInfo const& _testInfo ) CATCH_OVERRIDE { + currentTestCaseInfo = _testInfo; + } + virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { + m_sectionStack.push_back( _sectionInfo ); + } + + virtual void sectionEnded( SectionStats const& /* _sectionStats */ ) CATCH_OVERRIDE { + m_sectionStack.pop_back(); + } + virtual void testCaseEnded( TestCaseStats const& /* _testCaseStats */ ) CATCH_OVERRIDE { + currentTestCaseInfo.reset(); + } + virtual void testGroupEnded( TestGroupStats const& /* _testGroupStats */ ) CATCH_OVERRIDE { + currentGroupInfo.reset(); + } + virtual void testRunEnded( TestRunStats const& /* _testRunStats */ ) CATCH_OVERRIDE { + currentTestCaseInfo.reset(); + currentGroupInfo.reset(); + currentTestRunInfo.reset(); + } + + virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE { + // Don't do anything with this by default. + // It can optionally be overridden in the derived class. + } + + Ptr m_config; + std::ostream& stream; + + LazyStat currentTestRunInfo; + LazyStat currentGroupInfo; + LazyStat currentTestCaseInfo; + + std::vector m_sectionStack; + ReporterPreferences m_reporterPrefs; + }; + + struct CumulativeReporterBase : SharedImpl { + template + struct Node : SharedImpl<> { + explicit Node( T const& _value ) : value( _value ) {} + virtual ~Node() {} + + typedef std::vector > ChildNodes; + T value; + ChildNodes children; + }; + struct SectionNode : SharedImpl<> { + explicit SectionNode( SectionStats const& _stats ) : stats( _stats ) {} + virtual ~SectionNode(); + + bool operator == ( SectionNode const& other ) const { + return stats.sectionInfo.lineInfo == other.stats.sectionInfo.lineInfo; + } + bool operator == ( Ptr const& other ) const { + return operator==( *other ); + } + + SectionStats stats; + typedef std::vector > ChildSections; + typedef std::vector Assertions; + ChildSections childSections; + Assertions assertions; + std::string stdOut; + std::string stdErr; + }; + + struct BySectionInfo { + BySectionInfo( SectionInfo const& other ) : m_other( other ) {} + BySectionInfo( BySectionInfo const& other ) : m_other( other.m_other ) {} + bool operator() ( Ptr const& node ) const { + return node->stats.sectionInfo.lineInfo == m_other.lineInfo; + } + private: + void operator=( BySectionInfo const& ); + SectionInfo const& m_other; + }; + + typedef Node TestCaseNode; + typedef Node TestGroupNode; + typedef Node TestRunNode; + + CumulativeReporterBase( ReporterConfig const& _config ) + : m_config( _config.fullConfig() ), + stream( _config.stream() ) + { + m_reporterPrefs.shouldRedirectStdOut = false; + } + ~CumulativeReporterBase(); + + virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { + return m_reporterPrefs; + } + + virtual void testRunStarting( TestRunInfo const& ) CATCH_OVERRIDE {} + virtual void testGroupStarting( GroupInfo const& ) CATCH_OVERRIDE {} + + virtual void testCaseStarting( TestCaseInfo const& ) CATCH_OVERRIDE {} + + virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { + SectionStats incompleteStats( sectionInfo, Counts(), 0, false ); + Ptr node; + if( m_sectionStack.empty() ) { + if( !m_rootSection ) + m_rootSection = new SectionNode( incompleteStats ); + node = m_rootSection; + } + else { + SectionNode& parentNode = *m_sectionStack.back(); + SectionNode::ChildSections::const_iterator it = + std::find_if( parentNode.childSections.begin(), + parentNode.childSections.end(), + BySectionInfo( sectionInfo ) ); + if( it == parentNode.childSections.end() ) { + node = new SectionNode( incompleteStats ); + parentNode.childSections.push_back( node ); + } + else + node = *it; + } + m_sectionStack.push_back( node ); + m_deepestSection = node; + } + + virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} + + virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { + assert( !m_sectionStack.empty() ); + SectionNode& sectionNode = *m_sectionStack.back(); + sectionNode.assertions.push_back( assertionStats ); + // AssertionResult holds a pointer to a temporary DecomposedExpression, + // which getExpandedExpression() calls to build the expression string. + // Our section stack copy of the assertionResult will likely outlive the + // temporary, so it must be expanded or discarded now to avoid calling + // a destroyed object later. + prepareExpandedExpression( sectionNode.assertions.back().assertionResult ); + return true; + } + virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { + assert( !m_sectionStack.empty() ); + SectionNode& node = *m_sectionStack.back(); + node.stats = sectionStats; + m_sectionStack.pop_back(); + } + virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { + Ptr node = new TestCaseNode( testCaseStats ); + assert( m_sectionStack.size() == 0 ); + node->children.push_back( m_rootSection ); + m_testCases.push_back( node ); + m_rootSection.reset(); + + assert( m_deepestSection ); + m_deepestSection->stdOut = testCaseStats.stdOut; + m_deepestSection->stdErr = testCaseStats.stdErr; + } + virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { + Ptr node = new TestGroupNode( testGroupStats ); + node->children.swap( m_testCases ); + m_testGroups.push_back( node ); + } + virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { + Ptr node = new TestRunNode( testRunStats ); + node->children.swap( m_testGroups ); + m_testRuns.push_back( node ); + testRunEndedCumulative(); + } + virtual void testRunEndedCumulative() = 0; + + virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE {} + + virtual void prepareExpandedExpression( AssertionResult& result ) const { + if( result.isOk() ) + result.discardDecomposedExpression(); + else + result.expandDecomposedExpression(); + } + + Ptr m_config; + std::ostream& stream; + std::vector m_assertions; + std::vector > > m_sections; + std::vector > m_testCases; + std::vector > m_testGroups; + + std::vector > m_testRuns; + + Ptr m_rootSection; + Ptr m_deepestSection; + std::vector > m_sectionStack; + ReporterPreferences m_reporterPrefs; + + }; + + template + char const* getLineOfChars() { + static char line[CATCH_CONFIG_CONSOLE_WIDTH] = {0}; + if( !*line ) { + std::memset( line, C, CATCH_CONFIG_CONSOLE_WIDTH-1 ); + line[CATCH_CONFIG_CONSOLE_WIDTH-1] = 0; + } + return line; + } + + struct TestEventListenerBase : StreamingReporterBase { + TestEventListenerBase( ReporterConfig const& _config ) + : StreamingReporterBase( _config ) + {} + + virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} + virtual bool assertionEnded( AssertionStats const& ) CATCH_OVERRIDE { + return false; + } + }; + +} // end namespace Catch + +// #included from: ../internal/catch_reporter_registrars.hpp +#define TWOBLUECUBES_CATCH_REPORTER_REGISTRARS_HPP_INCLUDED + +namespace Catch { + + template + class LegacyReporterRegistrar { + + class ReporterFactory : public IReporterFactory { + virtual IStreamingReporter* create( ReporterConfig const& config ) const { + return new LegacyReporterAdapter( new T( config ) ); + } + + virtual std::string getDescription() const { + return T::getDescription(); + } + }; + + public: + + LegacyReporterRegistrar( std::string const& name ) { + getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); + } + }; + + template + class ReporterRegistrar { + + class ReporterFactory : public SharedImpl { + + // *** Please Note ***: + // - If you end up here looking at a compiler error because it's trying to register + // your custom reporter class be aware that the native reporter interface has changed + // to IStreamingReporter. The "legacy" interface, IReporter, is still supported via + // an adapter. Just use REGISTER_LEGACY_REPORTER to take advantage of the adapter. + // However please consider updating to the new interface as the old one is now + // deprecated and will probably be removed quite soon! + // Please contact me via github if you have any questions at all about this. + // In fact, ideally, please contact me anyway to let me know you've hit this - as I have + // no idea who is actually using custom reporters at all (possibly no-one!). + // The new interface is designed to minimise exposure to interface changes in the future. + virtual IStreamingReporter* create( ReporterConfig const& config ) const { + return new T( config ); + } + + virtual std::string getDescription() const { + return T::getDescription(); + } + }; + + public: + + ReporterRegistrar( std::string const& name ) { + getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); + } + }; + + template + class ListenerRegistrar { + + class ListenerFactory : public SharedImpl { + + virtual IStreamingReporter* create( ReporterConfig const& config ) const { + return new T( config ); + } + virtual std::string getDescription() const { + return std::string(); + } + }; + + public: + + ListenerRegistrar() { + getMutableRegistryHub().registerListener( new ListenerFactory() ); + } + }; +} + +#define INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) \ + namespace{ Catch::LegacyReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } + +#define INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) \ + namespace{ Catch::ReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } + +// Deprecated - use the form without INTERNAL_ +#define INTERNAL_CATCH_REGISTER_LISTENER( listenerType ) \ + namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } + +#define CATCH_REGISTER_LISTENER( listenerType ) \ + namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } + +// #included from: ../internal/catch_xmlwriter.hpp +#define TWOBLUECUBES_CATCH_XMLWRITER_HPP_INCLUDED + +#include +#include +#include +#include + +namespace Catch { + + class XmlEncode { + public: + enum ForWhat { ForTextNodes, ForAttributes }; + + XmlEncode( std::string const& str, ForWhat forWhat = ForTextNodes ) + : m_str( str ), + m_forWhat( forWhat ) + {} + + void encodeTo( std::ostream& os ) const { + + // Apostrophe escaping not necessary if we always use " to write attributes + // (see: http://www.w3.org/TR/xml/#syntax) + + for( std::size_t i = 0; i < m_str.size(); ++ i ) { + char c = m_str[i]; + switch( c ) { + case '<': os << "<"; break; + case '&': os << "&"; break; + + case '>': + // See: http://www.w3.org/TR/xml/#syntax + if( i > 2 && m_str[i-1] == ']' && m_str[i-2] == ']' ) + os << ">"; + else + os << c; + break; + + case '\"': + if( m_forWhat == ForAttributes ) + os << """; + else + os << c; + break; + + default: + // Escape control chars - based on contribution by @espenalb in PR #465 and + // by @mrpi PR #588 + if ( ( c >= 0 && c < '\x09' ) || ( c > '\x0D' && c < '\x20') || c=='\x7F' ) { + // see http://stackoverflow.com/questions/404107/why-are-control-characters-illegal-in-xml-1-0 + os << "\\x" << std::uppercase << std::hex << std::setfill('0') << std::setw(2) + << static_cast( c ); + } + else + os << c; + } + } + } + + friend std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ) { + xmlEncode.encodeTo( os ); + return os; + } + + private: + std::string m_str; + ForWhat m_forWhat; + }; + + class XmlWriter { + public: + + class ScopedElement { + public: + ScopedElement( XmlWriter* writer ) + : m_writer( writer ) + {} + + ScopedElement( ScopedElement const& other ) + : m_writer( other.m_writer ){ + other.m_writer = CATCH_NULL; + } + + ~ScopedElement() { + if( m_writer ) + m_writer->endElement(); + } + + ScopedElement& writeText( std::string const& text, bool indent = true ) { + m_writer->writeText( text, indent ); + return *this; + } + + template + ScopedElement& writeAttribute( std::string const& name, T const& attribute ) { + m_writer->writeAttribute( name, attribute ); + return *this; + } + + private: + mutable XmlWriter* m_writer; + }; + + XmlWriter() + : m_tagIsOpen( false ), + m_needsNewline( false ), + m_os( Catch::cout() ) + { + writeDeclaration(); + } + + XmlWriter( std::ostream& os ) + : m_tagIsOpen( false ), + m_needsNewline( false ), + m_os( os ) + { + writeDeclaration(); + } + + ~XmlWriter() { + while( !m_tags.empty() ) + endElement(); + } + + XmlWriter& startElement( std::string const& name ) { + ensureTagClosed(); + newlineIfNecessary(); + m_os << m_indent << '<' << name; + m_tags.push_back( name ); + m_indent += " "; + m_tagIsOpen = true; + return *this; + } + + ScopedElement scopedElement( std::string const& name ) { + ScopedElement scoped( this ); + startElement( name ); + return scoped; + } + + XmlWriter& endElement() { + newlineIfNecessary(); + m_indent = m_indent.substr( 0, m_indent.size()-2 ); + if( m_tagIsOpen ) { + m_os << "/>"; + m_tagIsOpen = false; + } + else { + m_os << m_indent << ""; + } + m_os << std::endl; + m_tags.pop_back(); + return *this; + } + + XmlWriter& writeAttribute( std::string const& name, std::string const& attribute ) { + if( !name.empty() && !attribute.empty() ) + m_os << ' ' << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << '"'; + return *this; + } + + XmlWriter& writeAttribute( std::string const& name, bool attribute ) { + m_os << ' ' << name << "=\"" << ( attribute ? "true" : "false" ) << '"'; + return *this; + } + + template + XmlWriter& writeAttribute( std::string const& name, T const& attribute ) { + std::ostringstream oss; + oss << attribute; + return writeAttribute( name, oss.str() ); + } + + XmlWriter& writeText( std::string const& text, bool indent = true ) { + if( !text.empty() ){ + bool tagWasOpen = m_tagIsOpen; + ensureTagClosed(); + if( tagWasOpen && indent ) + m_os << m_indent; + m_os << XmlEncode( text ); + m_needsNewline = true; + } + return *this; + } + + XmlWriter& writeComment( std::string const& text ) { + ensureTagClosed(); + m_os << m_indent << ""; + m_needsNewline = true; + return *this; + } + + void writeStylesheetRef( std::string const& url ) { + m_os << "\n"; + } + + XmlWriter& writeBlankLine() { + ensureTagClosed(); + m_os << '\n'; + return *this; + } + + void ensureTagClosed() { + if( m_tagIsOpen ) { + m_os << ">" << std::endl; + m_tagIsOpen = false; + } + } + + private: + XmlWriter( XmlWriter const& ); + void operator=( XmlWriter const& ); + + void writeDeclaration() { + m_os << "\n"; + } + + void newlineIfNecessary() { + if( m_needsNewline ) { + m_os << std::endl; + m_needsNewline = false; + } + } + + bool m_tagIsOpen; + bool m_needsNewline; + std::vector m_tags; + std::string m_indent; + std::ostream& m_os; + }; + +} +// #included from: catch_reenable_warnings.h + +#define TWOBLUECUBES_CATCH_REENABLE_WARNINGS_H_INCLUDED + +#ifdef __clang__ +# ifdef __ICC // icpc defines the __clang__ macro +# pragma warning(pop) +# else +# pragma clang diagnostic pop +# endif +#elif defined __GNUC__ +# pragma GCC diagnostic pop +#endif + + +namespace Catch { + class XmlReporter : public StreamingReporterBase { + public: + XmlReporter( ReporterConfig const& _config ) + : StreamingReporterBase( _config ), + m_xml(_config.stream()), + m_sectionDepth( 0 ) + { + m_reporterPrefs.shouldRedirectStdOut = true; + } + + virtual ~XmlReporter() CATCH_OVERRIDE; + + static std::string getDescription() { + return "Reports test results as an XML document"; + } + + virtual std::string getStylesheetRef() const { + return std::string(); + } + + void writeSourceInfo( SourceLineInfo const& sourceInfo ) { + m_xml + .writeAttribute( "filename", sourceInfo.file ) + .writeAttribute( "line", sourceInfo.line ); + } + + public: // StreamingReporterBase + + virtual void noMatchingTestCases( std::string const& s ) CATCH_OVERRIDE { + StreamingReporterBase::noMatchingTestCases( s ); + } + + virtual void testRunStarting( TestRunInfo const& testInfo ) CATCH_OVERRIDE { + StreamingReporterBase::testRunStarting( testInfo ); + std::string stylesheetRef = getStylesheetRef(); + if( !stylesheetRef.empty() ) + m_xml.writeStylesheetRef( stylesheetRef ); + m_xml.startElement( "Catch" ); + if( !m_config->name().empty() ) + m_xml.writeAttribute( "name", m_config->name() ); + } + + virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { + StreamingReporterBase::testGroupStarting( groupInfo ); + m_xml.startElement( "Group" ) + .writeAttribute( "name", groupInfo.name ); + } + + virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { + StreamingReporterBase::testCaseStarting(testInfo); + m_xml.startElement( "TestCase" ) + .writeAttribute( "name", trim( testInfo.name ) ) + .writeAttribute( "description", testInfo.description ) + .writeAttribute( "tags", testInfo.tagsAsString ); + + writeSourceInfo( testInfo.lineInfo ); + + if ( m_config->showDurations() == ShowDurations::Always ) + m_testCaseTimer.start(); + m_xml.ensureTagClosed(); + } + + virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { + StreamingReporterBase::sectionStarting( sectionInfo ); + if( m_sectionDepth++ > 0 ) { + m_xml.startElement( "Section" ) + .writeAttribute( "name", trim( sectionInfo.name ) ) + .writeAttribute( "description", sectionInfo.description ); + writeSourceInfo( sectionInfo.lineInfo ); + m_xml.ensureTagClosed(); + } + } + + virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } + + virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { + + AssertionResult const& result = assertionStats.assertionResult; + + bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); + + if( includeResults ) { + // Print any info messages in tags. + for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); + it != itEnd; + ++it ) { + if( it->type == ResultWas::Info ) { + m_xml.scopedElement( "Info" ) + .writeText( it->message ); + } else if ( it->type == ResultWas::Warning ) { + m_xml.scopedElement( "Warning" ) + .writeText( it->message ); + } + } + } + + // Drop out if result was successful but we're not printing them. + if( !includeResults && result.getResultType() != ResultWas::Warning ) + return true; + + // Print the expression if there is one. + if( result.hasExpression() ) { + m_xml.startElement( "Expression" ) + .writeAttribute( "success", result.succeeded() ) + .writeAttribute( "type", result.getTestMacroName() ); + + writeSourceInfo( result.getSourceInfo() ); + + m_xml.scopedElement( "Original" ) + .writeText( result.getExpression() ); + m_xml.scopedElement( "Expanded" ) + .writeText( result.getExpandedExpression() ); + } + + // And... Print a result applicable to each result type. + switch( result.getResultType() ) { + case ResultWas::ThrewException: + m_xml.startElement( "Exception" ); + writeSourceInfo( result.getSourceInfo() ); + m_xml.writeText( result.getMessage() ); + m_xml.endElement(); + break; + case ResultWas::FatalErrorCondition: + m_xml.startElement( "FatalErrorCondition" ); + writeSourceInfo( result.getSourceInfo() ); + m_xml.writeText( result.getMessage() ); + m_xml.endElement(); + break; + case ResultWas::Info: + m_xml.scopedElement( "Info" ) + .writeText( result.getMessage() ); + break; + case ResultWas::Warning: + // Warning will already have been written + break; + case ResultWas::ExplicitFailure: + m_xml.startElement( "Failure" ); + writeSourceInfo( result.getSourceInfo() ); + m_xml.writeText( result.getMessage() ); + m_xml.endElement(); + break; + default: + break; + } + + if( result.hasExpression() ) + m_xml.endElement(); + + return true; + } + + virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { + StreamingReporterBase::sectionEnded( sectionStats ); + if( --m_sectionDepth > 0 ) { + XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResults" ); + e.writeAttribute( "successes", sectionStats.assertions.passed ); + e.writeAttribute( "failures", sectionStats.assertions.failed ); + e.writeAttribute( "expectedFailures", sectionStats.assertions.failedButOk ); + + if ( m_config->showDurations() == ShowDurations::Always ) + e.writeAttribute( "durationInSeconds", sectionStats.durationInSeconds ); + + m_xml.endElement(); + } + } + + virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { + StreamingReporterBase::testCaseEnded( testCaseStats ); + XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResult" ); + e.writeAttribute( "success", testCaseStats.totals.assertions.allOk() ); + + if ( m_config->showDurations() == ShowDurations::Always ) + e.writeAttribute( "durationInSeconds", m_testCaseTimer.getElapsedSeconds() ); + + if( !testCaseStats.stdOut.empty() ) + m_xml.scopedElement( "StdOut" ).writeText( trim( testCaseStats.stdOut ), false ); + if( !testCaseStats.stdErr.empty() ) + m_xml.scopedElement( "StdErr" ).writeText( trim( testCaseStats.stdErr ), false ); + + m_xml.endElement(); + } + + virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { + StreamingReporterBase::testGroupEnded( testGroupStats ); + // TODO: Check testGroupStats.aborting and act accordingly. + m_xml.scopedElement( "OverallResults" ) + .writeAttribute( "successes", testGroupStats.totals.assertions.passed ) + .writeAttribute( "failures", testGroupStats.totals.assertions.failed ) + .writeAttribute( "expectedFailures", testGroupStats.totals.assertions.failedButOk ); + m_xml.endElement(); + } + + virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { + StreamingReporterBase::testRunEnded( testRunStats ); + m_xml.scopedElement( "OverallResults" ) + .writeAttribute( "successes", testRunStats.totals.assertions.passed ) + .writeAttribute( "failures", testRunStats.totals.assertions.failed ) + .writeAttribute( "expectedFailures", testRunStats.totals.assertions.failedButOk ); + m_xml.endElement(); + } + + private: + Timer m_testCaseTimer; + XmlWriter m_xml; + int m_sectionDepth; + }; + + INTERNAL_CATCH_REGISTER_REPORTER( "xml", XmlReporter ) + +} // end namespace Catch + +// #included from: ../reporters/catch_reporter_junit.hpp +#define TWOBLUECUBES_CATCH_REPORTER_JUNIT_HPP_INCLUDED + +#include + +namespace Catch { + + namespace { + std::string getCurrentTimestamp() { + // Beware, this is not reentrant because of backward compatibility issues + // Also, UTC only, again because of backward compatibility (%z is C++11) + time_t rawtime; + std::time(&rawtime); + const size_t timeStampSize = sizeof("2017-01-16T17:06:45Z"); + +#ifdef _MSC_VER + std::tm timeInfo = {}; + gmtime_s(&timeInfo, &rawtime); +#else + std::tm* timeInfo; + timeInfo = std::gmtime(&rawtime); +#endif + + char timeStamp[timeStampSize]; + const char * const fmt = "%Y-%m-%dT%H:%M:%SZ"; + +#ifdef _MSC_VER + std::strftime(timeStamp, timeStampSize, fmt, &timeInfo); +#else + std::strftime(timeStamp, timeStampSize, fmt, timeInfo); +#endif + return std::string(timeStamp); + } + + } + + class JunitReporter : public CumulativeReporterBase { + public: + JunitReporter( ReporterConfig const& _config ) + : CumulativeReporterBase( _config ), + xml( _config.stream() ), + m_okToFail( false ) + { + m_reporterPrefs.shouldRedirectStdOut = true; + } + + virtual ~JunitReporter() CATCH_OVERRIDE; + + static std::string getDescription() { + return "Reports test results in an XML format that looks like Ant's junitreport target"; + } + + virtual void noMatchingTestCases( std::string const& /*spec*/ ) CATCH_OVERRIDE {} + + virtual void testRunStarting( TestRunInfo const& runInfo ) CATCH_OVERRIDE { + CumulativeReporterBase::testRunStarting( runInfo ); + xml.startElement( "testsuites" ); + } + + virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { + suiteTimer.start(); + stdOutForSuite.str(""); + stdErrForSuite.str(""); + unexpectedExceptions = 0; + CumulativeReporterBase::testGroupStarting( groupInfo ); + } + + virtual void testCaseStarting( TestCaseInfo const& testCaseInfo ) CATCH_OVERRIDE { + m_okToFail = testCaseInfo.okToFail(); + } + virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { + if( assertionStats.assertionResult.getResultType() == ResultWas::ThrewException && !m_okToFail ) + unexpectedExceptions++; + return CumulativeReporterBase::assertionEnded( assertionStats ); + } + + virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { + stdOutForSuite << testCaseStats.stdOut; + stdErrForSuite << testCaseStats.stdErr; + CumulativeReporterBase::testCaseEnded( testCaseStats ); + } + + virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { + double suiteTime = suiteTimer.getElapsedSeconds(); + CumulativeReporterBase::testGroupEnded( testGroupStats ); + writeGroup( *m_testGroups.back(), suiteTime ); + } + + virtual void testRunEndedCumulative() CATCH_OVERRIDE { + xml.endElement(); + } + + void writeGroup( TestGroupNode const& groupNode, double suiteTime ) { + XmlWriter::ScopedElement e = xml.scopedElement( "testsuite" ); + TestGroupStats const& stats = groupNode.value; + xml.writeAttribute( "name", stats.groupInfo.name ); + xml.writeAttribute( "errors", unexpectedExceptions ); + xml.writeAttribute( "failures", stats.totals.assertions.failed-unexpectedExceptions ); + xml.writeAttribute( "tests", stats.totals.assertions.total() ); + xml.writeAttribute( "hostname", "tbd" ); // !TBD + if( m_config->showDurations() == ShowDurations::Never ) + xml.writeAttribute( "time", "" ); + else + xml.writeAttribute( "time", suiteTime ); + xml.writeAttribute( "timestamp", getCurrentTimestamp() ); + + // Write test cases + for( TestGroupNode::ChildNodes::const_iterator + it = groupNode.children.begin(), itEnd = groupNode.children.end(); + it != itEnd; + ++it ) + writeTestCase( **it ); + + xml.scopedElement( "system-out" ).writeText( trim( stdOutForSuite.str() ), false ); + xml.scopedElement( "system-err" ).writeText( trim( stdErrForSuite.str() ), false ); + } + + void writeTestCase( TestCaseNode const& testCaseNode ) { + TestCaseStats const& stats = testCaseNode.value; + + // All test cases have exactly one section - which represents the + // test case itself. That section may have 0-n nested sections + assert( testCaseNode.children.size() == 1 ); + SectionNode const& rootSection = *testCaseNode.children.front(); + + std::string className = stats.testInfo.className; + + if( className.empty() ) { + if( rootSection.childSections.empty() ) + className = "global"; + } + writeSection( className, "", rootSection ); + } + + void writeSection( std::string const& className, + std::string const& rootName, + SectionNode const& sectionNode ) { + std::string name = trim( sectionNode.stats.sectionInfo.name ); + if( !rootName.empty() ) + name = rootName + '/' + name; + + if( !sectionNode.assertions.empty() || + !sectionNode.stdOut.empty() || + !sectionNode.stdErr.empty() ) { + XmlWriter::ScopedElement e = xml.scopedElement( "testcase" ); + if( className.empty() ) { + xml.writeAttribute( "classname", name ); + xml.writeAttribute( "name", "root" ); + } + else { + xml.writeAttribute( "classname", className ); + xml.writeAttribute( "name", name ); + } + xml.writeAttribute( "time", Catch::toString( sectionNode.stats.durationInSeconds ) ); + + writeAssertions( sectionNode ); + + if( !sectionNode.stdOut.empty() ) + xml.scopedElement( "system-out" ).writeText( trim( sectionNode.stdOut ), false ); + if( !sectionNode.stdErr.empty() ) + xml.scopedElement( "system-err" ).writeText( trim( sectionNode.stdErr ), false ); + } + for( SectionNode::ChildSections::const_iterator + it = sectionNode.childSections.begin(), + itEnd = sectionNode.childSections.end(); + it != itEnd; + ++it ) + if( className.empty() ) + writeSection( name, "", **it ); + else + writeSection( className, name, **it ); + } + + void writeAssertions( SectionNode const& sectionNode ) { + for( SectionNode::Assertions::const_iterator + it = sectionNode.assertions.begin(), itEnd = sectionNode.assertions.end(); + it != itEnd; + ++it ) + writeAssertion( *it ); + } + void writeAssertion( AssertionStats const& stats ) { + AssertionResult const& result = stats.assertionResult; + if( !result.isOk() ) { + std::string elementName; + switch( result.getResultType() ) { + case ResultWas::ThrewException: + case ResultWas::FatalErrorCondition: + elementName = "error"; + break; + case ResultWas::ExplicitFailure: + elementName = "failure"; + break; + case ResultWas::ExpressionFailed: + elementName = "failure"; + break; + case ResultWas::DidntThrowException: + elementName = "failure"; + break; + + // We should never see these here: + case ResultWas::Info: + case ResultWas::Warning: + case ResultWas::Ok: + case ResultWas::Unknown: + case ResultWas::FailureBit: + case ResultWas::Exception: + elementName = "internalError"; + break; + } + + XmlWriter::ScopedElement e = xml.scopedElement( elementName ); + + xml.writeAttribute( "message", result.getExpandedExpression() ); + xml.writeAttribute( "type", result.getTestMacroName() ); + + std::ostringstream oss; + if( !result.getMessage().empty() ) + oss << result.getMessage() << '\n'; + for( std::vector::const_iterator + it = stats.infoMessages.begin(), + itEnd = stats.infoMessages.end(); + it != itEnd; + ++it ) + if( it->type == ResultWas::Info ) + oss << it->message << '\n'; + + oss << "at " << result.getSourceInfo(); + xml.writeText( oss.str(), false ); + } + } + + XmlWriter xml; + Timer suiteTimer; + std::ostringstream stdOutForSuite; + std::ostringstream stdErrForSuite; + unsigned int unexpectedExceptions; + bool m_okToFail; + }; + + INTERNAL_CATCH_REGISTER_REPORTER( "junit", JunitReporter ) + +} // end namespace Catch + +// #included from: ../reporters/catch_reporter_console.hpp +#define TWOBLUECUBES_CATCH_REPORTER_CONSOLE_HPP_INCLUDED + +#include +#include + +namespace Catch { + + struct ConsoleReporter : StreamingReporterBase { + ConsoleReporter( ReporterConfig const& _config ) + : StreamingReporterBase( _config ), + m_headerPrinted( false ) + {} + + virtual ~ConsoleReporter() CATCH_OVERRIDE; + static std::string getDescription() { + return "Reports test results as plain lines of text"; + } + + virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { + stream << "No test cases matched '" << spec << '\'' << std::endl; + } + + virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { + } + + virtual bool assertionEnded( AssertionStats const& _assertionStats ) CATCH_OVERRIDE { + AssertionResult const& result = _assertionStats.assertionResult; + + bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); + + // Drop out if result was successful but we're not printing them. + if( !includeResults && result.getResultType() != ResultWas::Warning ) + return false; + + lazyPrint(); + + AssertionPrinter printer( stream, _assertionStats, includeResults ); + printer.print(); + stream << std::endl; + return true; + } + + virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { + m_headerPrinted = false; + StreamingReporterBase::sectionStarting( _sectionInfo ); + } + virtual void sectionEnded( SectionStats const& _sectionStats ) CATCH_OVERRIDE { + if( _sectionStats.missingAssertions ) { + lazyPrint(); + Colour colour( Colour::ResultError ); + if( m_sectionStack.size() > 1 ) + stream << "\nNo assertions in section"; + else + stream << "\nNo assertions in test case"; + stream << " '" << _sectionStats.sectionInfo.name << "'\n" << std::endl; + } + if( m_config->showDurations() == ShowDurations::Always ) { + stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; + } + if( m_headerPrinted ) { + m_headerPrinted = false; + } + StreamingReporterBase::sectionEnded( _sectionStats ); + } + + virtual void testCaseEnded( TestCaseStats const& _testCaseStats ) CATCH_OVERRIDE { + StreamingReporterBase::testCaseEnded( _testCaseStats ); + m_headerPrinted = false; + } + virtual void testGroupEnded( TestGroupStats const& _testGroupStats ) CATCH_OVERRIDE { + if( currentGroupInfo.used ) { + printSummaryDivider(); + stream << "Summary for group '" << _testGroupStats.groupInfo.name << "':\n"; + printTotals( _testGroupStats.totals ); + stream << '\n' << std::endl; + } + StreamingReporterBase::testGroupEnded( _testGroupStats ); + } + virtual void testRunEnded( TestRunStats const& _testRunStats ) CATCH_OVERRIDE { + printTotalsDivider( _testRunStats.totals ); + printTotals( _testRunStats.totals ); + stream << std::endl; + StreamingReporterBase::testRunEnded( _testRunStats ); + } + + private: + + class AssertionPrinter { + void operator= ( AssertionPrinter const& ); + public: + AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) + : stream( _stream ), + stats( _stats ), + result( _stats.assertionResult ), + colour( Colour::None ), + message( result.getMessage() ), + messages( _stats.infoMessages ), + printInfoMessages( _printInfoMessages ) + { + switch( result.getResultType() ) { + case ResultWas::Ok: + colour = Colour::Success; + passOrFail = "PASSED"; + //if( result.hasMessage() ) + if( _stats.infoMessages.size() == 1 ) + messageLabel = "with message"; + if( _stats.infoMessages.size() > 1 ) + messageLabel = "with messages"; + break; + case ResultWas::ExpressionFailed: + if( result.isOk() ) { + colour = Colour::Success; + passOrFail = "FAILED - but was ok"; + } + else { + colour = Colour::Error; + passOrFail = "FAILED"; + } + if( _stats.infoMessages.size() == 1 ) + messageLabel = "with message"; + if( _stats.infoMessages.size() > 1 ) + messageLabel = "with messages"; + break; + case ResultWas::ThrewException: + colour = Colour::Error; + passOrFail = "FAILED"; + messageLabel = "due to unexpected exception with "; + if (_stats.infoMessages.size() == 1) + messageLabel += "message"; + if (_stats.infoMessages.size() > 1) + messageLabel += "messages"; + break; + case ResultWas::FatalErrorCondition: + colour = Colour::Error; + passOrFail = "FAILED"; + messageLabel = "due to a fatal error condition"; + break; + case ResultWas::DidntThrowException: + colour = Colour::Error; + passOrFail = "FAILED"; + messageLabel = "because no exception was thrown where one was expected"; + break; + case ResultWas::Info: + messageLabel = "info"; + break; + case ResultWas::Warning: + messageLabel = "warning"; + break; + case ResultWas::ExplicitFailure: + passOrFail = "FAILED"; + colour = Colour::Error; + if( _stats.infoMessages.size() == 1 ) + messageLabel = "explicitly with message"; + if( _stats.infoMessages.size() > 1 ) + messageLabel = "explicitly with messages"; + break; + // These cases are here to prevent compiler warnings + case ResultWas::Unknown: + case ResultWas::FailureBit: + case ResultWas::Exception: + passOrFail = "** internal error **"; + colour = Colour::Error; + break; + } + } + + void print() const { + printSourceInfo(); + if( stats.totals.assertions.total() > 0 ) { + if( result.isOk() ) + stream << '\n'; + printResultType(); + printOriginalExpression(); + printReconstructedExpression(); + } + else { + stream << '\n'; + } + printMessage(); + } + + private: + void printResultType() const { + if( !passOrFail.empty() ) { + Colour colourGuard( colour ); + stream << passOrFail << ":\n"; + } + } + void printOriginalExpression() const { + if( result.hasExpression() ) { + Colour colourGuard( Colour::OriginalExpression ); + stream << " "; + stream << result.getExpressionInMacro(); + stream << '\n'; + } + } + void printReconstructedExpression() const { + if( result.hasExpandedExpression() ) { + stream << "with expansion:\n"; + Colour colourGuard( Colour::ReconstructedExpression ); + stream << Text( result.getExpandedExpression(), TextAttributes().setIndent(2) ) << '\n'; + } + } + void printMessage() const { + if( !messageLabel.empty() ) + stream << messageLabel << ':' << '\n'; + for( std::vector::const_iterator it = messages.begin(), itEnd = messages.end(); + it != itEnd; + ++it ) { + // If this assertion is a warning ignore any INFO messages + if( printInfoMessages || it->type != ResultWas::Info ) + stream << Text( it->message, TextAttributes().setIndent(2) ) << '\n'; + } + } + void printSourceInfo() const { + Colour colourGuard( Colour::FileName ); + stream << result.getSourceInfo() << ": "; + } + + std::ostream& stream; + AssertionStats const& stats; + AssertionResult const& result; + Colour::Code colour; + std::string passOrFail; + std::string messageLabel; + std::string message; + std::vector messages; + bool printInfoMessages; + }; + + void lazyPrint() { + + if( !currentTestRunInfo.used ) + lazyPrintRunInfo(); + if( !currentGroupInfo.used ) + lazyPrintGroupInfo(); + + if( !m_headerPrinted ) { + printTestCaseAndSectionHeader(); + m_headerPrinted = true; + } + } + void lazyPrintRunInfo() { + stream << '\n' << getLineOfChars<'~'>() << '\n'; + Colour colour( Colour::SecondaryText ); + stream << currentTestRunInfo->name + << " is a Catch v" << libraryVersion() << " host application.\n" + << "Run with -? for options\n\n"; + + if( m_config->rngSeed() != 0 ) + stream << "Randomness seeded to: " << m_config->rngSeed() << "\n\n"; + + currentTestRunInfo.used = true; + } + void lazyPrintGroupInfo() { + if( !currentGroupInfo->name.empty() && currentGroupInfo->groupsCounts > 1 ) { + printClosedHeader( "Group: " + currentGroupInfo->name ); + currentGroupInfo.used = true; + } + } + void printTestCaseAndSectionHeader() { + assert( !m_sectionStack.empty() ); + printOpenHeader( currentTestCaseInfo->name ); + + if( m_sectionStack.size() > 1 ) { + Colour colourGuard( Colour::Headers ); + + std::vector::const_iterator + it = m_sectionStack.begin()+1, // Skip first section (test case) + itEnd = m_sectionStack.end(); + for( ; it != itEnd; ++it ) + printHeaderString( it->name, 2 ); + } + + SourceLineInfo lineInfo = m_sectionStack.back().lineInfo; + + if( !lineInfo.empty() ){ + stream << getLineOfChars<'-'>() << '\n'; + Colour colourGuard( Colour::FileName ); + stream << lineInfo << '\n'; + } + stream << getLineOfChars<'.'>() << '\n' << std::endl; + } + + void printClosedHeader( std::string const& _name ) { + printOpenHeader( _name ); + stream << getLineOfChars<'.'>() << '\n'; + } + void printOpenHeader( std::string const& _name ) { + stream << getLineOfChars<'-'>() << '\n'; + { + Colour colourGuard( Colour::Headers ); + printHeaderString( _name ); + } + } + + // if string has a : in first line will set indent to follow it on + // subsequent lines + void printHeaderString( std::string const& _string, std::size_t indent = 0 ) { + std::size_t i = _string.find( ": " ); + if( i != std::string::npos ) + i+=2; + else + i = 0; + stream << Text( _string, TextAttributes() + .setIndent( indent+i) + .setInitialIndent( indent ) ) << '\n'; + } + + struct SummaryColumn { + + SummaryColumn( std::string const& _label, Colour::Code _colour ) + : label( _label ), + colour( _colour ) + {} + SummaryColumn addRow( std::size_t count ) { + std::ostringstream oss; + oss << count; + std::string row = oss.str(); + for( std::vector::iterator it = rows.begin(); it != rows.end(); ++it ) { + while( it->size() < row.size() ) + *it = ' ' + *it; + while( it->size() > row.size() ) + row = ' ' + row; + } + rows.push_back( row ); + return *this; + } + + std::string label; + Colour::Code colour; + std::vector rows; + + }; + + void printTotals( Totals const& totals ) { + if( totals.testCases.total() == 0 ) { + stream << Colour( Colour::Warning ) << "No tests ran\n"; + } + else if( totals.assertions.total() > 0 && totals.testCases.allPassed() ) { + stream << Colour( Colour::ResultSuccess ) << "All tests passed"; + stream << " (" + << pluralise( totals.assertions.passed, "assertion" ) << " in " + << pluralise( totals.testCases.passed, "test case" ) << ')' + << '\n'; + } + else { + + std::vector columns; + columns.push_back( SummaryColumn( "", Colour::None ) + .addRow( totals.testCases.total() ) + .addRow( totals.assertions.total() ) ); + columns.push_back( SummaryColumn( "passed", Colour::Success ) + .addRow( totals.testCases.passed ) + .addRow( totals.assertions.passed ) ); + columns.push_back( SummaryColumn( "failed", Colour::ResultError ) + .addRow( totals.testCases.failed ) + .addRow( totals.assertions.failed ) ); + columns.push_back( SummaryColumn( "failed as expected", Colour::ResultExpectedFailure ) + .addRow( totals.testCases.failedButOk ) + .addRow( totals.assertions.failedButOk ) ); + + printSummaryRow( "test cases", columns, 0 ); + printSummaryRow( "assertions", columns, 1 ); + } + } + void printSummaryRow( std::string const& label, std::vector const& cols, std::size_t row ) { + for( std::vector::const_iterator it = cols.begin(); it != cols.end(); ++it ) { + std::string value = it->rows[row]; + if( it->label.empty() ) { + stream << label << ": "; + if( value != "0" ) + stream << value; + else + stream << Colour( Colour::Warning ) << "- none -"; + } + else if( value != "0" ) { + stream << Colour( Colour::LightGrey ) << " | "; + stream << Colour( it->colour ) + << value << ' ' << it->label; + } + } + stream << '\n'; + } + + static std::size_t makeRatio( std::size_t number, std::size_t total ) { + std::size_t ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number/ total : 0; + return ( ratio == 0 && number > 0 ) ? 1 : ratio; + } + static std::size_t& findMax( std::size_t& i, std::size_t& j, std::size_t& k ) { + if( i > j && i > k ) + return i; + else if( j > k ) + return j; + else + return k; + } + + void printTotalsDivider( Totals const& totals ) { + if( totals.testCases.total() > 0 ) { + std::size_t failedRatio = makeRatio( totals.testCases.failed, totals.testCases.total() ); + std::size_t failedButOkRatio = makeRatio( totals.testCases.failedButOk, totals.testCases.total() ); + std::size_t passedRatio = makeRatio( totals.testCases.passed, totals.testCases.total() ); + while( failedRatio + failedButOkRatio + passedRatio < CATCH_CONFIG_CONSOLE_WIDTH-1 ) + findMax( failedRatio, failedButOkRatio, passedRatio )++; + while( failedRatio + failedButOkRatio + passedRatio > CATCH_CONFIG_CONSOLE_WIDTH-1 ) + findMax( failedRatio, failedButOkRatio, passedRatio )--; + + stream << Colour( Colour::Error ) << std::string( failedRatio, '=' ); + stream << Colour( Colour::ResultExpectedFailure ) << std::string( failedButOkRatio, '=' ); + if( totals.testCases.allPassed() ) + stream << Colour( Colour::ResultSuccess ) << std::string( passedRatio, '=' ); + else + stream << Colour( Colour::Success ) << std::string( passedRatio, '=' ); + } + else { + stream << Colour( Colour::Warning ) << std::string( CATCH_CONFIG_CONSOLE_WIDTH-1, '=' ); + } + stream << '\n'; + } + void printSummaryDivider() { + stream << getLineOfChars<'-'>() << '\n'; + } + + private: + bool m_headerPrinted; + }; + + INTERNAL_CATCH_REGISTER_REPORTER( "console", ConsoleReporter ) + +} // end namespace Catch + +// #included from: ../reporters/catch_reporter_compact.hpp +#define TWOBLUECUBES_CATCH_REPORTER_COMPACT_HPP_INCLUDED + +namespace Catch { + + struct CompactReporter : StreamingReporterBase { + + CompactReporter( ReporterConfig const& _config ) + : StreamingReporterBase( _config ) + {} + + virtual ~CompactReporter(); + + static std::string getDescription() { + return "Reports test results on a single line, suitable for IDEs"; + } + + virtual ReporterPreferences getPreferences() const { + ReporterPreferences prefs; + prefs.shouldRedirectStdOut = false; + return prefs; + } + + virtual void noMatchingTestCases( std::string const& spec ) { + stream << "No test cases matched '" << spec << '\'' << std::endl; + } + + virtual void assertionStarting( AssertionInfo const& ) {} + + virtual bool assertionEnded( AssertionStats const& _assertionStats ) { + AssertionResult const& result = _assertionStats.assertionResult; + + bool printInfoMessages = true; + + // Drop out if result was successful and we're not printing those + if( !m_config->includeSuccessfulResults() && result.isOk() ) { + if( result.getResultType() != ResultWas::Warning ) + return false; + printInfoMessages = false; + } + + AssertionPrinter printer( stream, _assertionStats, printInfoMessages ); + printer.print(); + + stream << std::endl; + return true; + } + + virtual void sectionEnded(SectionStats const& _sectionStats) CATCH_OVERRIDE { + if (m_config->showDurations() == ShowDurations::Always) { + stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; + } + } + + virtual void testRunEnded( TestRunStats const& _testRunStats ) { + printTotals( _testRunStats.totals ); + stream << '\n' << std::endl; + StreamingReporterBase::testRunEnded( _testRunStats ); + } + + private: + class AssertionPrinter { + void operator= ( AssertionPrinter const& ); + public: + AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) + : stream( _stream ) + , stats( _stats ) + , result( _stats.assertionResult ) + , messages( _stats.infoMessages ) + , itMessage( _stats.infoMessages.begin() ) + , printInfoMessages( _printInfoMessages ) + {} + + void print() { + printSourceInfo(); + + itMessage = messages.begin(); + + switch( result.getResultType() ) { + case ResultWas::Ok: + printResultType( Colour::ResultSuccess, passedString() ); + printOriginalExpression(); + printReconstructedExpression(); + if ( ! result.hasExpression() ) + printRemainingMessages( Colour::None ); + else + printRemainingMessages(); + break; + case ResultWas::ExpressionFailed: + if( result.isOk() ) + printResultType( Colour::ResultSuccess, failedString() + std::string( " - but was ok" ) ); + else + printResultType( Colour::Error, failedString() ); + printOriginalExpression(); + printReconstructedExpression(); + printRemainingMessages(); + break; + case ResultWas::ThrewException: + printResultType( Colour::Error, failedString() ); + printIssue( "unexpected exception with message:" ); + printMessage(); + printExpressionWas(); + printRemainingMessages(); + break; + case ResultWas::FatalErrorCondition: + printResultType( Colour::Error, failedString() ); + printIssue( "fatal error condition with message:" ); + printMessage(); + printExpressionWas(); + printRemainingMessages(); + break; + case ResultWas::DidntThrowException: + printResultType( Colour::Error, failedString() ); + printIssue( "expected exception, got none" ); + printExpressionWas(); + printRemainingMessages(); + break; + case ResultWas::Info: + printResultType( Colour::None, "info" ); + printMessage(); + printRemainingMessages(); + break; + case ResultWas::Warning: + printResultType( Colour::None, "warning" ); + printMessage(); + printRemainingMessages(); + break; + case ResultWas::ExplicitFailure: + printResultType( Colour::Error, failedString() ); + printIssue( "explicitly" ); + printRemainingMessages( Colour::None ); + break; + // These cases are here to prevent compiler warnings + case ResultWas::Unknown: + case ResultWas::FailureBit: + case ResultWas::Exception: + printResultType( Colour::Error, "** internal error **" ); + break; + } + } + + private: + // Colour::LightGrey + + static Colour::Code dimColour() { return Colour::FileName; } + +#ifdef CATCH_PLATFORM_MAC + static const char* failedString() { return "FAILED"; } + static const char* passedString() { return "PASSED"; } +#else + static const char* failedString() { return "failed"; } + static const char* passedString() { return "passed"; } +#endif + + void printSourceInfo() const { + Colour colourGuard( Colour::FileName ); + stream << result.getSourceInfo() << ':'; + } + + void printResultType( Colour::Code colour, std::string const& passOrFail ) const { + if( !passOrFail.empty() ) { + { + Colour colourGuard( colour ); + stream << ' ' << passOrFail; + } + stream << ':'; + } + } + + void printIssue( std::string const& issue ) const { + stream << ' ' << issue; + } + + void printExpressionWas() { + if( result.hasExpression() ) { + stream << ';'; + { + Colour colour( dimColour() ); + stream << " expression was:"; + } + printOriginalExpression(); + } + } + + void printOriginalExpression() const { + if( result.hasExpression() ) { + stream << ' ' << result.getExpression(); + } + } + + void printReconstructedExpression() const { + if( result.hasExpandedExpression() ) { + { + Colour colour( dimColour() ); + stream << " for: "; + } + stream << result.getExpandedExpression(); + } + } + + void printMessage() { + if ( itMessage != messages.end() ) { + stream << " '" << itMessage->message << '\''; + ++itMessage; + } + } + + void printRemainingMessages( Colour::Code colour = dimColour() ) { + if ( itMessage == messages.end() ) + return; + + // using messages.end() directly yields compilation error: + std::vector::const_iterator itEnd = messages.end(); + const std::size_t N = static_cast( std::distance( itMessage, itEnd ) ); + + { + Colour colourGuard( colour ); + stream << " with " << pluralise( N, "message" ) << ':'; + } + + for(; itMessage != itEnd; ) { + // If this assertion is a warning ignore any INFO messages + if( printInfoMessages || itMessage->type != ResultWas::Info ) { + stream << " '" << itMessage->message << '\''; + if ( ++itMessage != itEnd ) { + Colour colourGuard( dimColour() ); + stream << " and"; + } + } + } + } + + private: + std::ostream& stream; + AssertionStats const& stats; + AssertionResult const& result; + std::vector messages; + std::vector::const_iterator itMessage; + bool printInfoMessages; + }; + + // Colour, message variants: + // - white: No tests ran. + // - red: Failed [both/all] N test cases, failed [both/all] M assertions. + // - white: Passed [both/all] N test cases (no assertions). + // - red: Failed N tests cases, failed M assertions. + // - green: Passed [both/all] N tests cases with M assertions. + + std::string bothOrAll( std::size_t count ) const { + return count == 1 ? std::string() : count == 2 ? "both " : "all " ; + } + + void printTotals( const Totals& totals ) const { + if( totals.testCases.total() == 0 ) { + stream << "No tests ran."; + } + else if( totals.testCases.failed == totals.testCases.total() ) { + Colour colour( Colour::ResultError ); + const std::string qualify_assertions_failed = + totals.assertions.failed == totals.assertions.total() ? + bothOrAll( totals.assertions.failed ) : std::string(); + stream << + "Failed " << bothOrAll( totals.testCases.failed ) + << pluralise( totals.testCases.failed, "test case" ) << ", " + "failed " << qualify_assertions_failed << + pluralise( totals.assertions.failed, "assertion" ) << '.'; + } + else if( totals.assertions.total() == 0 ) { + stream << + "Passed " << bothOrAll( totals.testCases.total() ) + << pluralise( totals.testCases.total(), "test case" ) + << " (no assertions)."; + } + else if( totals.assertions.failed ) { + Colour colour( Colour::ResultError ); + stream << + "Failed " << pluralise( totals.testCases.failed, "test case" ) << ", " + "failed " << pluralise( totals.assertions.failed, "assertion" ) << '.'; + } + else { + Colour colour( Colour::ResultSuccess ); + stream << + "Passed " << bothOrAll( totals.testCases.passed ) + << pluralise( totals.testCases.passed, "test case" ) << + " with " << pluralise( totals.assertions.passed, "assertion" ) << '.'; + } + } + }; + + INTERNAL_CATCH_REGISTER_REPORTER( "compact", CompactReporter ) + +} // end namespace Catch + +namespace Catch { + // These are all here to avoid warnings about not having any out of line + // virtual methods + NonCopyable::~NonCopyable() {} + IShared::~IShared() {} + IStream::~IStream() CATCH_NOEXCEPT {} + FileStream::~FileStream() CATCH_NOEXCEPT {} + CoutStream::~CoutStream() CATCH_NOEXCEPT {} + DebugOutStream::~DebugOutStream() CATCH_NOEXCEPT {} + StreamBufBase::~StreamBufBase() CATCH_NOEXCEPT {} + IContext::~IContext() {} + IResultCapture::~IResultCapture() {} + ITestCase::~ITestCase() {} + ITestCaseRegistry::~ITestCaseRegistry() {} + IRegistryHub::~IRegistryHub() {} + IMutableRegistryHub::~IMutableRegistryHub() {} + IExceptionTranslator::~IExceptionTranslator() {} + IExceptionTranslatorRegistry::~IExceptionTranslatorRegistry() {} + IReporter::~IReporter() {} + IReporterFactory::~IReporterFactory() {} + IReporterRegistry::~IReporterRegistry() {} + IStreamingReporter::~IStreamingReporter() {} + AssertionStats::~AssertionStats() {} + SectionStats::~SectionStats() {} + TestCaseStats::~TestCaseStats() {} + TestGroupStats::~TestGroupStats() {} + TestRunStats::~TestRunStats() {} + CumulativeReporterBase::SectionNode::~SectionNode() {} + CumulativeReporterBase::~CumulativeReporterBase() {} + + StreamingReporterBase::~StreamingReporterBase() {} + ConsoleReporter::~ConsoleReporter() {} + CompactReporter::~CompactReporter() {} + IRunner::~IRunner() {} + IMutableContext::~IMutableContext() {} + IConfig::~IConfig() {} + XmlReporter::~XmlReporter() {} + JunitReporter::~JunitReporter() {} + TestRegistry::~TestRegistry() {} + FreeFunctionTestCase::~FreeFunctionTestCase() {} + IGeneratorInfo::~IGeneratorInfo() {} + IGeneratorsForTest::~IGeneratorsForTest() {} + WildcardPattern::~WildcardPattern() {} + TestSpec::Pattern::~Pattern() {} + TestSpec::NamePattern::~NamePattern() {} + TestSpec::TagPattern::~TagPattern() {} + TestSpec::ExcludedPattern::~ExcludedPattern() {} + Matchers::Impl::MatcherUntypedBase::~MatcherUntypedBase() {} + + void Config::dummy() {} + + namespace TestCaseTracking { + ITracker::~ITracker() {} + TrackerBase::~TrackerBase() {} + SectionTracker::~SectionTracker() {} + IndexTracker::~IndexTracker() {} + } +} + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#endif + +#ifdef CATCH_CONFIG_MAIN +// #included from: internal/catch_default_main.hpp +#define TWOBLUECUBES_CATCH_DEFAULT_MAIN_HPP_INCLUDED + +#ifndef __OBJC__ + +#if defined(WIN32) && defined(_UNICODE) && !defined(DO_NOT_USE_WMAIN) +// Standard C/C++ Win32 Unicode wmain entry point +extern "C" int wmain (int argc, wchar_t * argv[], wchar_t * []) { +#else +// Standard C/C++ main entry point +int main (int argc, char * argv[]) { +#endif + + int result = Catch::Session().run( argc, argv ); + return ( result < 0xff ? result : 0xff ); +} + +#else // __OBJC__ + +// Objective-C entry point +int main (int argc, char * const argv[]) { +#if !CATCH_ARC_ENABLED + NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; +#endif + + Catch::registerTestMethods(); + int result = Catch::Session().run( argc, (char* const*)argv ); + +#if !CATCH_ARC_ENABLED + [pool drain]; +#endif + + return ( result < 0xff ? result : 0xff ); +} + +#endif // __OBJC__ + +#endif + +#ifdef CLARA_CONFIG_MAIN_NOT_DEFINED +# undef CLARA_CONFIG_MAIN +#endif + +////// + +// If this config identifier is defined then all CATCH macros are prefixed with CATCH_ +#ifdef CATCH_CONFIG_PREFIX_ALL + +#if defined(CATCH_CONFIG_FAST_COMPILE) +#define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) +#define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) +#else +#define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) +#define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) +#endif + +#define CATCH_REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) +#define CATCH_REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) +#define CATCH_REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) +#define CATCH_REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) + +#define CATCH_CHECK( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CATCH_CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) +#define CATCH_CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CATCH_CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CATCH_CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CATCH_CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CATCH_CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) + +#define CATCH_CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) +#define CATCH_CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CATCH_CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) +#define CATCH_CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) + +#define CATCH_CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) + +#if defined(CATCH_CONFIG_FAST_COMPILE) +#define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) +#else +#define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) +#endif + +#define CATCH_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) +#define CATCH_WARN( msg ) INTERNAL_CATCH_MSG( "CATCH_WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) +#define CATCH_SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) +#define CATCH_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) +#define CATCH_SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) + +#ifdef CATCH_CONFIG_VARIADIC_MACROS + #define CATCH_TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) + #define CATCH_TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) + #define CATCH_METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) + #define CATCH_REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) + #define CATCH_SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) + #define CATCH_FAIL( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) + #define CATCH_FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) + #define CATCH_SUCCEED( ... ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) +#else + #define CATCH_TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) + #define CATCH_TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) + #define CATCH_METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) + #define CATCH_REGISTER_TEST_CASE( function, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( function, name, description ) + #define CATCH_SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) + #define CATCH_FAIL( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) + #define CATCH_FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) + #define CATCH_SUCCEED( msg ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) +#endif +#define CATCH_ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) + +#define CATCH_REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) +#define CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) + +#define CATCH_GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) + +// "BDD-style" convenience wrappers +#ifdef CATCH_CONFIG_VARIADIC_MACROS +#define CATCH_SCENARIO( ... ) CATCH_TEST_CASE( "Scenario: " __VA_ARGS__ ) +#define CATCH_SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) +#else +#define CATCH_SCENARIO( name, tags ) CATCH_TEST_CASE( "Scenario: " name, tags ) +#define CATCH_SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) +#endif +#define CATCH_GIVEN( desc ) CATCH_SECTION( std::string( "Given: ") + desc, "" ) +#define CATCH_WHEN( desc ) CATCH_SECTION( std::string( " When: ") + desc, "" ) +#define CATCH_AND_WHEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) +#define CATCH_THEN( desc ) CATCH_SECTION( std::string( " Then: ") + desc, "" ) +#define CATCH_AND_THEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) + +// If CATCH_CONFIG_PREFIX_ALL is not defined then the CATCH_ prefix is not required +#else + +#if defined(CATCH_CONFIG_FAST_COMPILE) +#define REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE", Catch::ResultDisposition::Normal, expr ) +#define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) + +#else +#define REQUIRE( expr ) INTERNAL_CATCH_TEST( "REQUIRE", Catch::ResultDisposition::Normal, expr ) +#define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) +#endif + +#define REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) +#define REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) +#define REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) +#define REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) + +#define CHECK( expr ) INTERNAL_CATCH_TEST( "CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) +#define CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) + +#define CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) +#define CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) +#define CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) +#define CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) + +#define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) + +#if defined(CATCH_CONFIG_FAST_COMPILE) +#define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) +#else +#define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) +#endif + +#define INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) +#define WARN( msg ) INTERNAL_CATCH_MSG( "WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) +#define SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) +#define CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) +#define SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) + +#ifdef CATCH_CONFIG_VARIADIC_MACROS +#define TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) +#define TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) +#define METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) +#define REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) +#define SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) +#define FAIL( ... ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) +#define FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) +#define SUCCEED( ... ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) +#else +#define TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) + #define TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) + #define METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) + #define REGISTER_TEST_CASE( method, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( method, name, description ) + #define SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) + #define FAIL( msg ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) + #define FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) + #define SUCCEED( msg ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) +#endif +#define ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) + +#define REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) +#define REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) + +#define GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) + +#endif + +#define CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) + +// "BDD-style" convenience wrappers +#ifdef CATCH_CONFIG_VARIADIC_MACROS +#define SCENARIO( ... ) TEST_CASE( "Scenario: " __VA_ARGS__ ) +#define SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) +#else +#define SCENARIO( name, tags ) TEST_CASE( "Scenario: " name, tags ) +#define SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) +#endif +#define GIVEN( desc ) SECTION( std::string(" Given: ") + desc, "" ) +#define WHEN( desc ) SECTION( std::string(" When: ") + desc, "" ) +#define AND_WHEN( desc ) SECTION( std::string("And when: ") + desc, "" ) +#define THEN( desc ) SECTION( std::string(" Then: ") + desc, "" ) +#define AND_THEN( desc ) SECTION( std::string(" And: ") + desc, "" ) + +using Catch::Detail::Approx; + +#endif // TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED + diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests.cc new file mode 100644 index 000000000..0b8c8378e --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests.cc @@ -0,0 +1,339 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "../catch/catch.hpp" +#include +#include +#include "t_netcore_generator_functional_tests_helpers.h" + +TEST_CASE( "t_netcore_generator should generate valid enum", "[functional]" ) +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + + std::pair pair = TestDataGenerator::get_test_enum_data(program); + string expected_result = pair.first; + t_enum* test_enum = pair.second; + + string file_path = test_enum->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_enum(out, test_enum)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete test_enum; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid void", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + + std::pair pair = TestDataGenerator::get_test_void_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_THROWS(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid string with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_string_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid bool with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_bool_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid sbyte (i8) with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_i8_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid short (i16) with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_i16_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid integer (i32) with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_i32_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid long (i64) with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_i64_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should generate valid double with escaping keyword", "[functional]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + + std::pair pair = TestDataGenerator::get_test_double_const_data(gen); + string expected_result = pair.first; + t_const* const_ = pair.second; + vector consts_; + consts_.push_back(const_); + + string file_path = const_->get_name() + ".cs"; + ofstream out; + out.open(file_path.c_str()); + + REQUIRE_NOTHROW(gen->generate_consts(out, consts_)); + + out.close(); + + std::ifstream ifs(file_path); + string actual_result((std::istreambuf_iterator(ifs)), (std::istreambuf_iterator())); + std::remove(file_path.c_str()); + + REQUIRE(expected_result == actual_result); + + delete const_; + delete gen; + delete program; +} diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.cc new file mode 100644 index 000000000..92c170bb9 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.cc @@ -0,0 +1,237 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include +#include "thrift/common.h" +#include +#include "t_netcore_generator_functional_tests_helpers.h" + +const string TestDataGenerator::DEFAULT_FILE_HEADER = "/**" "\n" + " * Autogenerated by Thrift Compiler ()" "\n" + " *" "\n" + " * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING" "\n" + " * @generated" "\n" + " */"; + +std::pair TestDataGenerator::get_test_enum_data(t_program* program) +{ + string expected_result = DEFAULT_FILE_HEADER + + "\n" + "\n" + "/// \n" + "/// TestDoc\n" + "/// \n" + "public enum TestName\n" + "{\n" + " None = 0,\n" + " First = 1,\n" + " Second = 2,\n" + "}\n"; + + t_enum* enum_ = new t_enum(program); + enum_->set_name("TestName"); + enum_->set_doc("TestDoc"); + enum_->append(new t_enum_value("None", 0)); + enum_->append(new t_enum_value("First", 1)); + enum_->append(new t_enum_value("Second", 2)); + + return std::pair(expected_result, enum_); +} + +std::pair TestDataGenerator::get_test_void_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER; + + t_type* type_ = new t_base_type("void", t_base_type::TYPE_VOID); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_string("VoidValue"); + + t_const* const_ = new t_const(type_, "void", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_string_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const string @string = \"StringValue\";\n" + "}\n"; + + t_type* type_ = new t_base_type("string", t_base_type::TYPE_STRING); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_string("StringValue"); + + t_const* const_ = new t_const(type_, "string", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_bool_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const bool @bool = true;\n" + "}\n"; + + t_type* type_ = new t_base_type("bool", t_base_type::TYPE_BOOL); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_integer(1); + + t_const* const_ = new t_const(type_, "bool", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_i8_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const sbyte @sbyte = 127;\n" + "}\n"; + + t_type* type_ = new t_base_type("I8", t_base_type::TYPE_I8); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_integer(127); + + t_const* const_ = new t_const(type_, "sbyte", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_i16_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const short @short = 32767;\n" + "}\n"; + + t_type* type_ = new t_base_type("i16", t_base_type::TYPE_I16); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_integer(32767); + + t_const* const_ = new t_const(type_, "short", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_i32_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const int @int = 2147483647;\n" + "}\n"; + + t_type* type_ = new t_base_type("i32", t_base_type::TYPE_I32); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_integer(2147483647); + + t_const* const_ = new t_const(type_, "int", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_i64_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const long @long = 9223372036854775807;\n" + "}\n"; + + t_type* type_ = new t_base_type("i64", t_base_type::TYPE_I64); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_integer(9223372036854775807); + + t_const* const_ = new t_const(type_, "long", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} + +std::pair TestDataGenerator::get_test_double_const_data(t_netcore_generator* gen) +{ + string expected_result = DEFAULT_FILE_HEADER + "\n" +gen->netcore_type_usings() + + "\n" + "public static class netcoreConstants\n" + "{\n" + " /// \n" + " /// TestDoc\n" + " /// \n" + " public const double @double = 9.22337e+18;\n" + "}\n"; + + t_type* type_ = new t_base_type("double", t_base_type::TYPE_DOUBLE); + type_->set_doc("TestDoc"); + + t_const_value* const_value_ = new t_const_value(); + const_value_->set_double(9223372036854775807.1); + + t_const* const_ = new t_const(type_, "double", const_value_); + const_->set_doc("TestDoc"); + + return std::pair(expected_result, const_); +} diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.h b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.h new file mode 100644 index 000000000..c6eaac22c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_functional_tests_helpers.h @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include + +class TestDataGenerator +{ +public: + static const string DEFAULT_FILE_HEADER; + + static std::pair get_test_enum_data(t_program* program); + static std::pair get_test_void_const_data(t_netcore_generator* gen); + static std::pair get_test_string_const_data(t_netcore_generator* gen); + static std::pair get_test_bool_const_data(t_netcore_generator* gen); + static std::pair get_test_i8_const_data(t_netcore_generator* gen); + static std::pair get_test_i16_const_data(t_netcore_generator* gen); + static std::pair get_test_i32_const_data(t_netcore_generator* gen); + static std::pair get_test_i64_const_data(t_netcore_generator* gen); + static std::pair get_test_double_const_data(t_netcore_generator* gen); +}; diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_helpers_tests.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_helpers_tests.cc new file mode 100644 index 000000000..0bcbeed19 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_helpers_tests.cc @@ -0,0 +1,209 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "../catch/catch.hpp" +#include +#include + +using std::vector; + +TEST_CASE("t_netcore_generator::netcore_type_usings() without option wcf should return valid namespaces", "[helpers]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "union", "union" } }; + string option_string = ""; + + string expected_namespaces = "using System;\n" + "using System.Collections;\n" + "using System.Collections.Generic;\n" + "using System.Text;\n" + "using System.IO;\n" + "using System.Threading;\n" + "using System.Threading.Tasks;\n" + "using Thrift;\n" + "using Thrift.Collections;\n" + endl; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + + REQUIRE_FALSE(gen->is_wcf_enabled()); + REQUIRE(gen->netcore_type_usings() == expected_namespaces); + + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator::netcore_type_usings() with option wcf should return valid namespaces", "[helpers]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + + string expected_namespaces_wcf = "using System;\n" + "using System.Collections;\n" + "using System.Collections.Generic;\n" + "using System.Text;\n" + "using System.IO;\n" + "using System.Threading;\n" + "using System.Threading.Tasks;\n" + "using Thrift;\n" + "using Thrift.Collections;\n" + "using System.ServiceModel;\n" + "using System.Runtime.Serialization;\n" + endl; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + + REQUIRE(gen->is_wcf_enabled()); + REQUIRE(gen->netcore_type_usings() == expected_namespaces_wcf); + + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should contains latest C# keywords to normalize with @", "[helpers]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" } }; + string option_string = ""; + vector current_keywords = { + { "abstract" }, + { "as" }, + { "base" }, + { "bool" }, + { "break" }, + { "byte" }, + { "case" }, + { "catch" }, + { "char" }, + { "checked" }, + { "class" }, + { "const" }, + { "continue" }, + { "decimal" }, + { "default" }, + { "delegate" }, + { "do" }, + { "double" }, + { "else" }, + { "enum" }, + { "event" }, + { "explicit" }, + { "extern" }, + { "false" }, + { "finally" }, + { "fixed" }, + { "float" }, + { "for" }, + { "foreach" }, + { "goto" }, + { "if" }, + { "implicit" }, + { "in" }, + { "int" }, + { "interface" }, + { "internal" }, + { "is" }, + { "lock" }, + { "long" }, + { "namespace" }, + { "new" }, + { "null" }, + { "object" }, + { "operator" }, + { "out" }, + { "override" }, + { "params" }, + { "private" }, + { "protected" }, + { "public" }, + { "readonly" }, + { "ref" }, + { "return" }, + { "sbyte" }, + { "sealed" }, + { "short" }, + { "sizeof" }, + { "stackalloc" }, + { "static" }, + { "string" }, + { "struct" }, + { "switch" }, + { "this" }, + { "throw" }, + { "true" }, + { "try" }, + { "typeof" }, + { "uint" }, + { "ulong" }, + { "unchecked" }, + { "unsafe" }, + { "ushort" }, + { "using" }, + { "void" }, + { "volatile" }, + { "while" }, + // Contextual Keywords + { "add" }, + { "alias" }, + { "ascending" }, + { "async" }, + { "await" }, + { "descending" }, + { "dynamic" }, + { "from" }, + { "get" }, + { "global" }, + { "group" }, + { "into" }, + { "join" }, + { "let" }, + { "orderby" }, + { "partial" }, + { "remove" }, + { "select" }, + { "set" }, + { "value" }, + { "var" }, + { "when" }, + { "where" }, + { "yield" } + }; + + string missed_keywords = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + gen->init_generator(); + map generators_keywords = gen->get_keywords_list(); + + for (vector::iterator it = current_keywords.begin(); it != current_keywords.end(); ++it) + { + if (generators_keywords.find(*it) == generators_keywords.end()) + { + missed_keywords = missed_keywords + *it + ","; + } + } + + REQUIRE(missed_keywords == ""); + + delete gen; + delete program; +} diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_initialization_tests.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_initialization_tests.cc new file mode 100644 index 000000000..ec17733bd --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/netcore/t_netcore_generator_initialization_tests.cc @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "../catch/catch.hpp" +#include +#include + +TEST_CASE( "t_netcore_generator should throw error with unknown options", "[initialization]" ) +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "keys", "keys" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = nullptr; + + REQUIRE_THROWS(gen = new t_netcore_generator(program, parsed_options, option_string)); + + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should create valid instance with valid options", "[initialization]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" }, { "nullable", "nullable"} }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = nullptr; + + REQUIRE_NOTHROW(gen = new t_netcore_generator(program, parsed_options, option_string)); + REQUIRE(gen != nullptr); + REQUIRE(gen->is_wcf_enabled()); + REQUIRE(gen->is_nullable_enabled()); + REQUIRE_FALSE(gen->is_hashcode_enabled()); + REQUIRE_FALSE(gen->is_serialize_enabled()); + REQUIRE_FALSE(gen->is_union_enabled()); + + delete gen; + delete program; +} + +TEST_CASE("t_netcore_generator should pass init succesfully", "[initialization]") +{ + string path = "CassandraTest.thrift"; + string name = "netcore"; + map parsed_options = { { "wcf", "wcf" },{ "nullable", "nullable" } }; + string option_string = ""; + + t_program* program = new t_program(path, name); + t_netcore_generator* gen = new t_netcore_generator(program, parsed_options, option_string); + + REQUIRE_NOTHROW(gen->init_generator()); + + delete gen; + delete program; +} diff --git a/vendor/git.apache.org/thrift.git/compiler/cpp/tests/tests_main.cc b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/tests_main.cc new file mode 100644 index 000000000..21d09b9f0 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/compiler/cpp/tests/tests_main.cc @@ -0,0 +1,19 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#define CATCH_CONFIG_MAIN +#include "catch/catch.hpp" diff --git a/vendor/git.apache.org/thrift.git/composer.json b/vendor/git.apache.org/thrift.git/composer.json index 9d6454ee1..9a3833161 100644 --- a/vendor/git.apache.org/thrift.git/composer.json +++ b/vendor/git.apache.org/thrift.git/composer.json @@ -16,15 +16,22 @@ "issues": "https://issues.apache.org/jira/browse/THRIFT" }, "require": { - "php": ">=5.3.0" + "php": "^5.5 || ^7.0" + }, + "require-dev": { + "phpunit/phpunit": "~4.8.36", + "squizlabs/php_codesniffer": "3.*" }, "autoload": { - "psr-0": {"Thrift": "lib/php/lib/"} + "psr-4": {"Thrift\\": "lib/php/lib/"} + }, + "autoload-dev": { + "psr-4": {"Test\\Thrift\\": "lib/php/test/"} }, "minimum-stability": "dev", "extra": { "branch-alias": { - "dev-master": "0.11.0" + "dev-master": "1.0.0-dev" } } } diff --git a/vendor/git.apache.org/thrift.git/configure.ac b/vendor/git.apache.org/thrift.git/configure.ac index 6a7a1a587..917f6fa22 100755 --- a/vendor/git.apache.org/thrift.git/configure.ac +++ b/vendor/git.apache.org/thrift.git/configure.ac @@ -20,7 +20,7 @@ AC_PREREQ(2.65) AC_CONFIG_MACRO_DIR([./aclocal]) -AC_INIT([thrift], [0.11.0]) +AC_INIT([thrift], [1.0.0-dev]) AC_CONFIG_AUX_DIR([.]) @@ -119,6 +119,7 @@ if test "$enable_libs" = "no"; then have_libs="no" with_cpp="no" with_c_glib="no" + with_cl="no" with_java="no" with_csharp="no" with_python="no" @@ -235,6 +236,7 @@ if test "$with_java" = "yes"; then AX_CHECK_ANT_VERSION($ANT, 1.7) AC_SUBST(CLASSPATH) AC_SUBST(ANT_FLAGS) + AC_SUBST(GRADLE_OPTS) if test "x$JAVA" != "x" && test "x$JAVAC" != "x" && test "x$ANT" != "x" ; then have_java="yes" fi @@ -353,9 +355,6 @@ if test "$with_php_extension" = "yes"; then fi AM_CONDITIONAL(WITH_PHP_EXTENSION, [test "$have_php_extension" = "yes"]) -AC_PATH_PROG([PHPUNIT], [phpunit]) -AM_CONDITIONAL(HAVE_PHPUNIT, [test "x$PHPUNIT" != "x"]) - AX_THRIFT_LIB(dart, [DART], yes) if test "$with_dart" = "yes"; then AC_PATH_PROG([DART], [dart]) @@ -455,6 +454,16 @@ if test "$with_rs" = "yes"; then fi AM_CONDITIONAL(WITH_RS, [test "$have_rs" = "yes"]) +AX_THRIFT_LIB(cl, [Common Lisp], yes) +have_cl="no" +if test "$with_cl" = "yes"; then + AC_PATH_PROG([SBCL], [sbcl]) + if test "x$SBCL" != "x"; then + have_cl="yes" + fi +fi +AM_CONDITIONAL(WITH_CL, [test "$have_cl" = "yes"]) + AX_THRIFT_LIB(haxe, [Haxe], yes) if test "$with_haxe" = "yes"; then AC_PATH_PROG([HAXE], [haxe]) @@ -515,7 +524,7 @@ linker flags for OPTLINK. Please set DMD_LIBEVENT_FLAGS manually.]) else AX_LIB_EVENT([2.0]) if test "$success" = "yes"; then - DMD_LIBEVENT_FLAGS=$(echo "$LIBEVENT_LDFLAGS $LIBEVENT_LIBS" | \ + DMD_LIBEVENT_FLAGS=$(echo "-fuse-ld=gold $LIBEVENT_LDFLAGS $LIBEVENT_LIBS" | \ sed -e 's/^ *//g;s/ *$//g;s/^\(.\)/-L\1/g;s/ */ -L/g') with_d_event_tests="yes" else @@ -539,7 +548,7 @@ linker flags for OPTLINK. Please set DMD_OPENSSL_FLAGS manually.]) else AX_CHECK_OPENSSL([with_d_ssl_tests="yes"]) if test "$with_d_ssl_tests" = "yes"; then - DMD_OPENSSL_FLAGS=$(echo "$OPENSSL_LDFLAGS $OPENSSL_LIBS" | \ + DMD_OPENSSL_FLAGS=$(echo "-fuse-ld=gold $OPENSSL_LDFLAGS $OPENSSL_LIBS" | \ sed -e 's/^ *//g;s/ *$//g;s/^\(.\)/-L\1/g;s/ */ -L/g') else AC_MSG_WARN([D OpenSSL interface present, but OpenSSL library not found.]) @@ -624,8 +633,10 @@ AC_CHECK_HEADERS([limits.h]) AC_CHECK_HEADERS([netdb.h]) AC_CHECK_HEADERS([netinet/in.h]) AC_CHECK_HEADERS([pthread.h]) +AC_CHECK_HEADERS([signal.h]) AC_CHECK_HEADERS([stddef.h]) AC_CHECK_HEADERS([stdlib.h]) +AC_CHECK_HEADERS([sys/ioctl.h]) AC_CHECK_HEADERS([sys/socket.h]) AC_CHECK_HEADERS([sys/time.h]) AC_CHECK_HEADERS([sys/un.h]) @@ -786,6 +797,7 @@ AC_CONFIG_FILES([ compiler/cpp/test/Makefile compiler/cpp/src/thrift/version.h lib/Makefile + lib/cl/Makefile lib/cpp/Makefile lib/cpp/test/Makefile lib/cpp/thrift-nb.pc @@ -827,6 +839,7 @@ AC_CONFIG_FILES([ test/Makefile test/features/Makefile test/c_glib/Makefile + test/cl/Makefile test/cpp/Makefile test/csharp/Makefile test/erl/Makefile @@ -845,6 +858,7 @@ AC_CONFIG_FILES([ test/rs/Makefile tutorial/Makefile tutorial/c_glib/Makefile + tutorial/cl/Makefile tutorial/cpp/Makefile tutorial/d/Makefile tutorial/go/Makefile @@ -898,6 +912,8 @@ if test "$have_rs" = "yes" ; then MAYBE_RS="rs" ; else MAYBE_RS="" ; fi AC_SUBST([MAYBE_RS]) if test "$have_dotnetcore" = "yes" ; then MAYBE_DOTNETCORE="netcore" ; else MAYBE_DOTNETCORE="" ; fi AC_SUBST([MAYBE_DOTNETCORE]) +if test "$have_cl" = "yes" ; then MAYBE_CL="cl" ; else MAYBE_CL="" ; fi +AC_SUBST([MAYBE_CL]) AC_OUTPUT @@ -908,6 +924,7 @@ echo echo "Building C (GLib) Library .... : $have_c_glib" echo "Building C# (Mono) Library ... : $have_csharp" echo "Building C++ Library ......... : $have_cpp" +echo "Building Common Lisp Library.. : $have_cl" echo "Building D Library ........... : $have_d" echo "Building Dart Library ........ : $have_dart" echo "Building dotnetcore Library .. : $have_dotnetcore" @@ -942,6 +959,12 @@ if test "$have_cpp" = "yes" ; then echo " Build TQTcpServer (Qt5) ... : $have_qt5" echo " C++ compiler version ...... : $($CXX --version | head -1)" fi +if test "$have_cl" = "yes" ; then + echo + echo "Common Lisp Library:" + echo " Using Common Lisp ......... : $SBCL" + echo " Using Common Lisp version . : $($SBCL --version)" +fi if test "$have_d" = "yes" ; then echo echo "D Library:" @@ -992,10 +1015,10 @@ fi if test "$have_java" = "yes" ; then echo echo "Java Library:" - echo " Using ant ................. : $ANT" + echo " Using gradlew ............. : lib/java/gradlew" echo " Using java ................ : $JAVA" echo " Using javac ............... : $JAVAC" - echo " Using ant version ......... : $($ANT -version 2>&1)" + echo " Using Gradle version ...... : $(lib/java/gradlew --version --quiet | grep Gradle 2>&1)" echo " Using java version ........ : $($JAVA -version 2>&1 | grep 'version ')" fi if test "$have_lua" = "yes" ; then diff --git a/vendor/git.apache.org/thrift.git/contrib/Rebus/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/contrib/Rebus/Properties/AssemblyInfo.cs index 9c4d7ccb9..e476eab76 100644 --- a/vendor/git.apache.org/thrift.git/contrib/Rebus/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/contrib/Rebus/Properties/AssemblyInfo.cs @@ -34,5 +34,5 @@ using System.Runtime.InteropServices; [assembly: Guid("0af10984-40d3-453d-b1e5-421529e8c7e2")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/contrib/fb303/cpp/FacebookBase.h b/vendor/git.apache.org/thrift.git/contrib/fb303/cpp/FacebookBase.h index 2159c95fd..daa524644 100644 --- a/vendor/git.apache.org/thrift.git/contrib/fb303/cpp/FacebookBase.h +++ b/vendor/git.apache.org/thrift.git/contrib/fb303/cpp/FacebookBase.h @@ -22,6 +22,7 @@ #include "FacebookService.h" +#include #include #include diff --git a/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.properties b/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.properties new file mode 100644 index 000000000..84636683c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.properties @@ -0,0 +1,5 @@ +# Maven Ant tasks Jar details +mvn.ant.task.version=2.1.3 +mvn.repo=http://repo1.maven.org/maven2 +mvn.ant.task.url=${mvn.repo}/org/apache/maven/maven-ant-tasks/${mvn.ant.task.version} +mvn.ant.task.jar=maven-ant-tasks-${mvn.ant.task.version}.jar diff --git a/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.xml b/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.xml index 8f2fa51c7..591a4cbd0 100755 --- a/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.xml +++ b/vendor/git.apache.org/thrift.git/contrib/fb303/java/build.xml @@ -7,9 +7,9 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - + http://www.apache.org/licenses/LICENSE-2.0 - + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -26,20 +26,20 @@ - + + - + - + - @@ -74,8 +74,12 @@ - - + + + + + + @@ -120,19 +124,19 @@ - - diff --git a/vendor/git.apache.org/thrift.git/contrib/fb303/py/setup.py b/vendor/git.apache.org/thrift.git/contrib/fb303/py/setup.py index 9dafa88cf..4321ce258 100644 --- a/vendor/git.apache.org/thrift.git/contrib/fb303/py/setup.py +++ b/vendor/git.apache.org/thrift.git/contrib/fb303/py/setup.py @@ -26,7 +26,7 @@ except: from distutils.core import setup, Extension, Command setup(name='thrift_fb303', - version='0.11.0', + version='1.0.0-dev', description='Python bindings for the Apache Thrift FB303', author=['Thrift Developers'], author_email=['dev@thrift.apache.org'], diff --git a/vendor/git.apache.org/thrift.git/contrib/thrift-maven-plugin/pom.xml b/vendor/git.apache.org/thrift.git/contrib/thrift-maven-plugin/pom.xml index 76d0d4f3a..0af595764 100644 --- a/vendor/git.apache.org/thrift.git/contrib/thrift-maven-plugin/pom.xml +++ b/vendor/git.apache.org/thrift.git/contrib/thrift-maven-plugin/pom.xml @@ -27,7 +27,7 @@ thrift-maven-plugin maven-plugin thrift-maven-plugin - 0.11.0 + 1.0-SNAPSHOT diff --git a/vendor/git.apache.org/thrift.git/contrib/vagrant/centos-6.5/README.md b/vendor/git.apache.org/thrift.git/contrib/vagrant/centos-6.5/README.md index bd7f022eb..76dca4439 100644 --- a/vendor/git.apache.org/thrift.git/contrib/vagrant/centos-6.5/README.md +++ b/vendor/git.apache.org/thrift.git/contrib/vagrant/centos-6.5/README.md @@ -11,7 +11,7 @@ This will download and launch the base box VM under VirtualBox and run the Apach $ vagrant ssh [vagrant@thrift ~]$ cd /thrift [vagrant@thrift thrift]$ compiler/cpp/thrift --version - Thrift version 0.11.0 + Thrift version 1.0.0-dev The provisioning script (inside the Vagrantfile) runs ./bootstrap.sh, ./configure, make and make check, but does not install thrift. To install thrift run "make install". diff --git a/vendor/git.apache.org/thrift.git/contrib/zeromq/csharp/ThriftZMQ.csproj b/vendor/git.apache.org/thrift.git/contrib/zeromq/csharp/ThriftZMQ.csproj index 32a8c498b..6b4653065 100755 --- a/vendor/git.apache.org/thrift.git/contrib/zeromq/csharp/ThriftZMQ.csproj +++ b/vendor/git.apache.org/thrift.git/contrib/zeromq/csharp/ThriftZMQ.csproj @@ -1,22 +1,4 @@  - Debug @@ -43,7 +25,7 @@ false true 0 - 0.11.0.%2a + 1.0.0.%2a false false true diff --git a/vendor/git.apache.org/thrift.git/debian/changelog b/vendor/git.apache.org/thrift.git/debian/changelog index 68629d6e1..f2da2327e 100644 --- a/vendor/git.apache.org/thrift.git/debian/changelog +++ b/vendor/git.apache.org/thrift.git/debian/changelog @@ -1,8 +1,7 @@ -thrift (0.11.0) stable; urgency=low +thrift (1.0.0-dev) stable; urgency=low * update version * fix libthrift0.install - * update to 0.11.0 -- Roger Meier Tue, 08 Jan 2013 22:40:12 +0100 thrift (0.9.0) stable; urgency=low diff --git a/vendor/git.apache.org/thrift.git/debian/control b/vendor/git.apache.org/thrift.git/debian/control index 218d21722..cb8a3764f 100644 --- a/vendor/git.apache.org/thrift.git/debian/control +++ b/vendor/git.apache.org/thrift.git/debian/control @@ -3,12 +3,12 @@ Section: devel Priority: extra Build-Depends: debhelper (>= 9), build-essential, mono-mcs, python-dev, ant, mono-devel, libmono-system-web4.0-cil, erlang-base, ruby-dev | ruby1.9.1-dev, ruby-bundler ,autoconf, automake, - pkg-config, libtool, bison, flex, libboost-dev | libboost1.53-dev, + pkg-config, libtool, bison, flex, libboost-dev | libboost1.53-dev | libboost1.63-all-dev, python-all, python-setuptools, python-all-dev, python-all-dbg, python3-all, python3-setuptools, python3-all-dev, python3-all-dbg, openjdk-7-jdk | openjdk-8-jdk | default-jdk, - libboost-test-dev | libboost-test1.53-dev, libevent-dev, libssl-dev, perl (>= 5.8.0-7), - php5 | php7.0, php5-dev | php7.0-dev, libglib2.0-dev, qtchooser, qtbase5-dev-tools + libboost-test-dev | libboost-test1.53-dev | libboost-test1.63-dev, libevent-dev, libssl-dev, perl (>= 5.8.0-7), + php (>= 5), php-dev (>= 5), libglib2.0-dev, qtchooser, qtbase5-dev-tools Maintainer: Thrift Developer's Homepage: http://thrift.apache.org/ Vcs-Git: https://git-wip-us.apache.org/repos/asf/thrift.git diff --git a/vendor/git.apache.org/thrift.git/debian/rules b/vendor/git.apache.org/thrift.git/debian/rules index f370325f4..9b436d97a 100755 --- a/vendor/git.apache.org/thrift.git/debian/rules +++ b/vendor/git.apache.org/thrift.git/debian/rules @@ -73,7 +73,7 @@ build-indep-stamp: configure-stamp $(CURDIR)/compiler/cpp/thrift # Java cd $(CURDIR)/lib/java && \ - ant + ./gradlew --no-daemon -Prelease=true jar # C# $(MAKE) -C $(CURDIR)/lib/csharp @@ -113,7 +113,7 @@ install-indep: # Java mkdir -p $(CURDIR)/debian/libthrift-java/usr/share/java/ && \ - cp $(CURDIR)/lib/java/build/libthrift*.jar \ + cp $(CURDIR)/lib/java/build/libs/libthrift*.jar \ $(CURDIR)/debian/libthrift-java/usr/share/java/ # Ruby diff --git a/vendor/git.apache.org/thrift.git/doap.rdf b/vendor/git.apache.org/thrift.git/doap.rdf index 06447c744..75274aa7b 100755 --- a/vendor/git.apache.org/thrift.git/doap.rdf +++ b/vendor/git.apache.org/thrift.git/doap.rdf @@ -58,11 +58,6 @@ - - Apache Thrift - 2017-11-30 - 0.11.0 - Apache Thrift 2017-03-01 diff --git a/vendor/git.apache.org/thrift.git/doc/install/debian.md b/vendor/git.apache.org/thrift.git/doc/install/debian.md index 5e1ae4c49..83090ab33 100644 --- a/vendor/git.apache.org/thrift.git/doc/install/debian.md +++ b/vendor/git.apache.org/thrift.git/doc/install/debian.md @@ -1,7 +1,7 @@ ## Debian/Ubuntu install The following command will install tools and libraries required to build and install the Apache Thrift compiler and C++ libraries on a Debian/Ubuntu Linux based system. - sudo apt-get install automake bison flex g++ git libboost1.55-all-dev libevent-dev libssl-dev libtool make pkg-config + sudo apt-get install automake bison flex g++ git libboost-all-dev libevent-dev libssl-dev libtool make pkg-config Debian 7/Ubuntu 12 users need to manually install a more recent version of automake and (for C++ library and test support) boost: @@ -18,9 +18,10 @@ Debian 7/Ubuntu 12 users need to manually install a more recent version of autom If you would like to build Apache Thrift libraries for other programming languages you may need to install additional packages. The following languages require the specified additional packages: * Java - * To build Apache Thrift support for Java you will need to install the ant package and Java JDK v1.7 or higher. Type **javac** to see a list of available packages, pick the one you prefer and **apt-get install** it (e.g. openjdk-7-jdk). + * packages: ant + * You will also need Java JDK v1.7 or higher. Type **javac** to see a list of available packages, pick the one you prefer and **apt-get install** it (e.g. default-jdk). * Ruby - * ruby-full ruby-dev ruby-rspec rake rubygems libdaemons-ruby libgemplugin-ruby mongrel + * ruby-full ruby-dev ruby-rspec rake rubygems bundler * Python * python-all python-all-dev python-all-dbg * Perl @@ -30,14 +31,29 @@ If you would like to build Apache Thrift libraries for other programming languag * C_glib * libglib2.0-dev * Erlang - * erlang-base erlang-eunit erlang-dev + * erlang-base erlang-eunit erlang-dev rebar * Csharp * mono-gmcs mono-devel libmono-system-web2.0-cil nunit nunit-console * Haskell - * ghc6 cabal-install libghc6-binary-dev libghc6-network-dev libghc6-http-dev + * ghc cabal-install libghc-binary-dev libghc-network-dev libghc-http-dev * Thrift Compiler for Windows - * mingw32 mingw32-binutils mingw32-runtime nsis - + * mingw-w64 mingw-w64-x86-64-dev nsis + * Rust + * rustc cargo + * Haxe + * haxe + * Lua + * lua5.3 liblua5.3-dev + * NodeJs + * nodejs npm + * dotnetcore + * https://www.microsoft.com/net/learn/get-started/linuxubuntu + * d-lang + * curl -fsS https://dlang.org/install.sh | bash -s dmd + * dart & pub + * https://www.dartlang.org/install/linux + * https://www.dartlang.org/tools/pub/installing + ## Additional reading diff --git a/vendor/git.apache.org/thrift.git/lib/Makefile.am b/vendor/git.apache.org/thrift.git/lib/Makefile.am index 636f42cfc..0401c99e1 100644 --- a/vendor/git.apache.org/thrift.git/lib/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/Makefile.am @@ -97,6 +97,10 @@ if WITH_RS SUBDIRS += rs endif +if WITH_CL +SUBDIRS += cl +endif + # All of the libs that don't use Automake need to go in here # so they will end up in our release tarballs. EXTRA_DIST = \ diff --git a/vendor/git.apache.org/thrift.git/lib/as3/build.properties b/vendor/git.apache.org/thrift.git/lib/as3/build.properties new file mode 100644 index 000000000..84636683c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/as3/build.properties @@ -0,0 +1,5 @@ +# Maven Ant tasks Jar details +mvn.ant.task.version=2.1.3 +mvn.repo=http://repo1.maven.org/maven2 +mvn.ant.task.url=${mvn.repo}/org/apache/maven/maven-ant-tasks/${mvn.ant.task.version} +mvn.ant.task.jar=maven-ant-tasks-${mvn.ant.task.version}.jar diff --git a/vendor/git.apache.org/thrift.git/lib/as3/build.xml b/vendor/git.apache.org/thrift.git/lib/as3/build.xml index 604da42e5..2b374ddde 100755 --- a/vendor/git.apache.org/thrift.git/lib/as3/build.xml +++ b/vendor/git.apache.org/thrift.git/lib/as3/build.xml @@ -7,9 +7,9 @@ to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - + http://www.apache.org/licenses/LICENSE-2.0 - + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -19,23 +19,24 @@ --> - + - + - + + - + - + @@ -52,12 +53,13 @@ + - + @@ -69,10 +71,10 @@ - @@ -108,19 +110,19 @@ - - @@ -176,5 +178,5 @@ - + diff --git a/vendor/git.apache.org/thrift.git/lib/cl/Makefile.am b/vendor/git.apache.org/thrift.git/lib/cl/Makefile.am new file mode 100644 index 000000000..34b38861d --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cl/Makefile.am @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +THRIFT = $(top_builddir)/compiler/cpp/thrift + +all-local: + bash ensure-externals.sh + +run-tests: test/make-test-binary.lisp + $(SBCL) --script test/make-test-binary.lisp + +check-local: run-tests + ./run-tests + +clean-local: + $(RM) run-tests quicklisp.lisp backport-update.zip + $(RM) -rf lib externals quicklisp + +EXTRA_DIST = \ + README.md \ + READMES \ + load-locally.lisp \ + test \ + ensure-externals.sh diff --git a/vendor/git.apache.org/thrift.git/lib/cl/README.md b/vendor/git.apache.org/thrift.git/lib/cl/README.md new file mode 100644 index 000000000..1d6eafbd7 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cl/README.md @@ -0,0 +1,253 @@ +Thrift Common Lisp Library + +License +======= + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. + + + +Using Thrift with Common Lisp +============================ + + Thrift is a protocol and library for language-independent communication between cooperating + processes. The communication takes the form of request and response messages, of which the forms + are specified in advance throufh a shared interface definition. A Thrift definition file is translated + into Lisp source files, which comprise several definitions: + + * Three packages, one for the namespace of the implementation operators, and one each for request and + response operators. + * Various type definitions as implementations for Thrift typedef and enum definitions. + * DEF-STRUCT and DEF-EXCEPTION forms for Thrift struct and exception definitions. + * DEF-SERVICE forms for thrift service definitions. + + Each service definition expands in a collection of generic function definitions. For each `op` + in the service definition, two functions are defined + + * `op`-request is defined for use by a client. It accepts an additional initial `protocol` argument, + to act as the client proxy for the operation and mediate the interaction with a remote process + through a Thrift-encoded transport stream. + * `op`-response is defined for use by a server. It accepts a single `protocol` argument. A server + uses it to decode the request message, invoke the base `op` function with the message arguments, + encode and send the the result as a response, and handles exceptions. + + The client interface is one operator + + * `with-client (variable location) . body` : creates a connection in a dynamic context and closes it + upon exit. The variable is bound to a client proxy stream/protocol instance, which wraps the + base i/o stream - socket, file, etc, with an operators which implement the Thrift protocol + and transport mechanisms. + + The server interface combines server and service objects + + * `serve (location service)` : accepts connections on the designated port and responds to + requests of the service's operations. + + +Building +-------- + +The Thrift Common Lisp library is packaged as the ASDF[[1]] system `thrift`. +It depends on the systems + +* puri[[2]] : for the thrift uri class +* closer-mop[[3]] : for class metadata +* trivial-utf-8[[4]] : for string codecs +* usocket[[5]] : for the socket transport +* ieee-floats[[6]] : for conversion between ints and floats +* trivial-gray-streams[[7]] : an abstraction layer for gray streams +* alexandria[[8]] : handy utilities + +The dependencies are bundled for local builds of tests and tutorial binaries - +it is possible to use those bundles to load the library, too. + +In order to build it, register those systems with ASDF and evaluate: + + (asdf:load-system :thrift) + +This will compile and load the Lisp compiler for Thrift definition files, the +transport and protocol implementations, and the client and server interface +functions. In order to use Thrift in an application, one must also author and/or +load the interface definitions for the remote service.[[9]] If one is implementing a service, +one must also define the actual functions to which Thrift is to act as the proxy +interface. The remainder of this document follows the Thrift tutorial to illustrate how +to perform the steps + + * implement the service + * translate the Thrift IDL + * load the Lisp service interfaces + * run a server for the service + * use a client to access the service remotely + +Note that, if one is to implement a new service, one will also need to author the +IDL files, as there is no facility to generate them from a service implementation. + + +Implement the Service +--------------------- + +The tutorial comprises serveral functions: `add`, `ping`, `zip`, and `calculate`. +Each translated IDL file generates three packages for every service. In the case of +the tutorial file, the relevant packages are: + + * tutorial.calculator + * tutorial.calculator-implementation + * tutorial.calculator-response + +This is to separate the request (generated), response (generated) and implementation +(meant to be implemented by the programmer) functions for defined Thrift methods. + +It is suggested to work in the `tutorial-implementation` package while implementing +the services - it imports the `common-lisp` package, while the service-specific ones +don't (to avoid conflicts between Thrift method names and function names in `common-lisp`). + + ;; define the base operations + + (in-package :tutorial-implementation) + + (defun tutorial.calculator-implementation:add (num1 num2) + (format t "~&Asked to add ~A and ~A." num1 num2) + (+ num1 num2)) + + (defun tutorial.calculator-implementation:ping () + (print :ping)) + + (defun tutorial.calculator-implementation:zip () + (print :zip)) + + (defun tutorial.calculator-implementation:calculate (logid task) + (calculate-op (work-op task) (work-num1 task) (work-num2 task))) + + (defgeneric calculate-op (op arg1 arg2) + (:method :around (op arg1 arg2) + (let ((result (call-next-method))) + (format t "~&Asked to calculate: ~d on ~A and ~A = ~d." op arg1 arg2 result) + result)) + + (:method ((op (eql operation.add)) arg1 arg2) + (+ arg1 arg2)) + (:method ((op (eql operation.subtract)) arg1 arg2) + (- arg1 arg2)) + (:method ((op (eql operation.multiply)) arg1 arg2) + (* arg1 arg2)) + (:method ((op (eql operation.divide)) arg1 arg2) + (/ arg1 arg2))) + + (defun zip () (print 'zip)) + + +Translate the Thrift IDL +------------------------ + +IDL files employ the file extension `thrift`. In this case, there are two files to translate + * `tutorial.thrift` + * `shared.thrift` +As the former includes the latter, one uses it to generate the interfaces: + + $THRIFT/bin/thrift -r --gen cl $THRIFT/tutorial/tutorial.thrift + +`-r` stands for recursion, while `--gen` lets one choose the language to translate to. + + +Load the Lisp translated service interfaces +------------------------------------------- + +The translator generates three files for each IDL file. For example `tutorial-types.lisp`, +`tutorial-vars.lisp` and an `.asd` file that can be used to load them both and pull in +other includes (like `shared` within the tutorial) as dependencies. + + +Run a Server for the Service +---------------------------- + +The actual service name, as specified in the `def-service` form in `tutorial.lisp`, is `calculator`. +Each service definition defines a global variable with the service name and binds it to a +service instance whch describes the operations. + +In order to start a service, specify a location and the service instance. + + (in-package :tutorial) + (serve #u"thrift://127.0.0.1:9091" calculator) + + +Use a Client to Access the Service Remotely +------------------------------------------- + + +[in some other process] run the client + + (in-package :cl-user) + + (macrolet ((show (form) + `(format *trace-output* "~%~s =>~{ ~s~}" + ',form + (multiple-value-list (ignore-errors ,form))))) + (with-client (protocol #u"thrift://127.0.0.1:9091") + (show (tutorial.calculator:ping protocol)) + (show (tutorial.calculator:add protocol 1 2)) + (show (tutorial.calculator:add protocol 1 4)) + + (let ((task (make-instance 'tutorial:work + :op operation.subtract :num1 15 :num2 10))) + (show (tutorial.calculator:calculate protocol 1 task)) + + (setf (tutorial:work-op task) operation.divide + (tutorial:work-num1 task) 1 + (tutorial:work-num2 task) 0) + (show (tutorial.calculator:calculate protocol 1 task))) + + (show (shared.shared-service:get-struct protocol 1)) + + (show (zip protocol)))) + +Issues +------ + +### optional fields + Where the IDL declares a field options, the def-struct form includes no + initform for the slot and the encoding operator skips an unbound slot. This leave some ambiguity + with bool fields. + +### instantiation protocol : + struct classes are standard classes and exception classes are + whatever the implementation prescribes. decoders apply make-struct to an initargs list. + particularly at the service end, there are advantages to resourcing structs and decoding + with direct side-effects on slot-values + +### maps: + Maps are now represented as hash tables. As data through the call/reply interface is all statically + typed, it is not necessary for the objects to themselves indicate the coding form. Association lists + would be sufficient. As the key type is arbitrary, property lists offer no additional convenience: + as `getf` operates with `eq` a new access interface would be necessary and they would not be + available for function application. + + + [1]: www.common-lisp.net/asdf + [2]: http://github.com/lisp/com.b9.puri.ppcre + [3]: www.common-lisp.net/closer-mop + [4]: trivial-utf-8 + [5]: https://github.com/usocket/usocket + [6]: https://github.com/marijnh/ieee-floats + [7]: https://github.com/trivial-gray-streams/trivial-gray-streams + [8]: https://gitlab.common-lisp.net/alexandria/alexandria + [9]: http://wiki.apache.org/thrift/ThriftGeneration + +* usocket[[5]] : for the socket transport +* ieee-floats[[6]] : for conversion between ints and floats +* trivial-gray-streams[[7]] : an abstraction layer for gray streams +* alexandria[[8]] : handy utilities diff --git a/vendor/git.apache.org/thrift.git/lib/cl/READMES/readme-cassandra.lisp b/vendor/git.apache.org/thrift.git/lib/cl/READMES/readme-cassandra.lisp new file mode 100644 index 000000000..72744ea99 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cl/READMES/readme-cassandra.lisp @@ -0,0 +1,64 @@ +(in-package :cl-user) + +#+(or ccl sbcl) /development/source/library/ +(load "build-init.lisp") + +;;; ! first, select the api version in the cassandra system definition +;;; as only one should be loaded at a time. +(asdf:load-system :de.setf.cassandra) + +(in-package :de.setf.cassandra) + +(defparameter *c-location* + ;; remote + ;; #u"thrift://ec2-174-129-66-148.compute-1.amazonaws.com:9160" + ;; local + #u"thrift://127.0.0.1:9160" + "A cassandra service location - either the local one or a remote service + - always a 'thrift' uri.") + +(defparameter *c* (thrift:client *c-location*)) + + +(cassandra:describe-keyspaces *c*) +;; => ("Keyspace1" "system") + +(cassandra:describe-cluster-name *c*) +;; =>"Test Cluster" + +(cassandra:describe-version *c*) +;; => "2.1.0" + +(loop for space in (cassandra:describe-keyspaces *c*) + collect (loop for key being each hash-key of (cassandra:describe-keyspace *c* space) + using (hash-value value) + collect (cons key + (loop for key being each hash-key of value + using (hash-value value) + collect (cons key value))))) + + +(close *c*) + +(defun describe-cassandra (location &optional (stream *standard-output*)) + "Print the first-order store metadata for a cassandra LOCATION." + + (thrift:with-client (cassandra location) + (let* ((keyspace-names (cassandra:describe-keyspaces cassandra)) + (cluster (cassandra:describe-cluster-name cassandra)) + (version (cassandra:describe-version cassandra)) + (keyspace-descriptions (loop for space in keyspace-names + collect (cons space + (loop for key being each hash-key + of (cassandra:describe-keyspace cassandra space) + using (hash-value value) + collect (cons key + (loop for key being each hash-key of value + using (hash-value value) + collect (cons key value)))))))) + (format stream "~&connection to : ~a" cassandra) + (format stream "~&version : ~a" version) + (format stream "~&cluster : ~a" cluster) + (format stream "~&keyspaces~{~{~%~%space: ~a~@{~% ~{~a :~@{~20t~:w~^~%~}~}~}~}~}" keyspace-descriptions)))) + +;;; (describe-cassandra *c-location*) diff --git a/vendor/git.apache.org/thrift.git/lib/cl/ensure-externals.sh b/vendor/git.apache.org/thrift.git/lib/cl/ensure-externals.sh new file mode 100755 index 000000000..0495f030c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cl/ensure-externals.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +if [[ ! -e quicklisp.lisp ]]; then curl -O https://beta.quicklisp.org/quicklisp.lisp; fi +sbcl --load quicklisp.lisp \ + --eval "(ignore-errors (quicklisp-quickstart:install :path \"quicklisp/\"))" \ + --eval "(load \"quicklisp/setup.lisp\")" \ + --eval "(quicklisp:bundle-systems '(#:puri #:usocket #:closer-mop #:trivial-utf-8 #:ieee-floats #:trivial-gray-streams #:alexandria #:bordeaux-threads #:cl-ppcre #:fiasco #:net.didierverna.clon) :to \"externals/\")" \ + --eval "(quit)" \ + --no-userinit +if [[ ! -e backport-update.zip ]]; then + curl -O -L https://github.com/TurtleWarePL/de.setf.thrift/archive/backport-update.zip; +fi +mkdir -p lib +unzip -u backport-update.zip -d lib diff --git a/vendor/git.apache.org/thrift.git/lib/cl/load-locally.lisp b/vendor/git.apache.org/thrift.git/lib/cl/load-locally.lisp new file mode 100644 index 000000000..d12c70476 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cl/load-locally.lisp @@ -0,0 +1,23 @@ +(in-package #:cl-user) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +;;;; Just a script for loading the library itself, using bundled dependencies. +;;;; This is here for when we want to build the self-test and cross-test +;;;; binaries. + +(require "asdf") + +(load (merge-pathnames "externals/bundle.lisp" *load-truename*)) +(asdf:load-asd (merge-pathnames "lib/de.setf.thrift-backport-update/thrift.asd" *load-truename*)) +(asdf:load-system :thrift) diff --git a/vendor/git.apache.org/thrift.git/lib/cl/test/make-test-binary.lisp b/vendor/git.apache.org/thrift.git/lib/cl/test/make-test-binary.lisp new file mode 100644 index 000000000..4e7a58cc4 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cl/test/make-test-binary.lisp @@ -0,0 +1,31 @@ +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +;;;; This file is used to build the binary that runs all self-tests. The +;;;; binary is then meant to be hooked up to Thrift's `make check` facility, +;;;; but can easily be run on its own as well. + +(in-package #:cl-user) + +(require "asdf") +(load (merge-pathnames "../load-locally.lisp" *load-truename*)) +(asdf:load-asd (merge-pathnames "../lib/de.setf.thrift-backport-update/test/thrift-test.asd" *load-truename*)) +(asdf:load-system :thrift-test) +(asdf:load-system :net.didierverna.clon) + +(net.didierverna.clon:nickname-package) + +(defun main () + (let ((result (if (fiasco:run-tests 'thrift-test) 0 -1))) + (clon:exit result))) + +(clon:dump "run-tests" main) diff --git a/vendor/git.apache.org/thrift.git/lib/cocoa/src/Thrift.h b/vendor/git.apache.org/thrift.git/lib/cocoa/src/Thrift.h index 89486b2b8..6fc594a4b 100644 --- a/vendor/git.apache.org/thrift.git/lib/cocoa/src/Thrift.h +++ b/vendor/git.apache.org/thrift.git/lib/cocoa/src/Thrift.h @@ -17,4 +17,4 @@ * under the License. */ -#define ThriftVersion @"0.11.0" +#define ThriftVersion @"1.0.0-dev" diff --git a/vendor/git.apache.org/thrift.git/lib/cocoa/src/protocol/TBase.h b/vendor/git.apache.org/thrift.git/lib/cocoa/src/protocol/TBase.h index b31061ec0..9935d5068 100644 --- a/vendor/git.apache.org/thrift.git/lib/cocoa/src/protocol/TBase.h +++ b/vendor/git.apache.org/thrift.git/lib/cocoa/src/protocol/TBase.h @@ -26,14 +26,14 @@ /** * De-serialize object from the given input protocol * - * @param input protocol used for reading + * @param inProtocol protocol used for reading */ -(BOOL) read:(id )inProtocol error:(NSError **)error; /** * Serialize object to the given protocol * - * @param buf output protocol used for writing + * @param outProtocol output protocol used for writing */ -(BOOL) write:(id )outProtocol error:(NSError **)error; diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/Makefile.am b/vendor/git.apache.org/thrift.git/lib/cpp/Makefile.am index cd1d92346..ff3be5d3c 100755 --- a/vendor/git.apache.org/thrift.git/lib/cpp/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/cpp/Makefile.am @@ -150,7 +150,7 @@ libthriftz_la_CXXFLAGS = $(AM_CXXFLAGS) libthriftqt_la_CXXFLAGS = $(AM_CXXFLAGS) libthriftqt5_la_CXXFLAGS = $(AM_CXXFLAGS) libthriftnb_la_LDFLAGS = -release $(VERSION) $(BOOST_LDFLAGS) -libthriftz_la_LDFLAGS = -release $(VERSION) $(BOOST_LDFLAGS) +libthriftz_la_LDFLAGS = -release $(VERSION) $(BOOST_LDFLAGS) $(ZLIB_LIBS) libthriftqt_la_LDFLAGS = -release $(VERSION) $(BOOST_LDFLAGS) $(QT_LIBS) libthriftqt5_la_LDFLAGS = -release $(VERSION) $(BOOST_LDFLAGS) $(QT5_LIBS) diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/TOutput.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/TOutput.cpp index bb46263b8..ae3a9e282 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/TOutput.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/TOutput.cpp @@ -94,7 +94,7 @@ void TOutput::errorTimeWrapper(const char* msg) { } void TOutput::perror(const char* message, int errno_copy) { - std::string out = message + strerror_s(errno_copy); + std::string out = message + std::string(": ") + strerror_s(errno_copy); f_(out.c_str()); } diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.cpp index 74acfaa84..6af8104b6 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.cpp @@ -38,13 +38,14 @@ TEvhttpClientChannel::TEvhttpClientChannel(const std::string& host, const std::string& path, const char* address, int port, - struct event_base* eb) + struct event_base* eb, + struct evdns_base* dnsbase) + : host_(host), path_(path), conn_(NULL) { - conn_ = evhttp_connection_new(address, port); + conn_ = evhttp_connection_base_new(eb, dnsbase, address, port); if (conn_ == NULL) { throw TException("evhttp_connection_new failed"); } - evhttp_connection_set_base(conn_, eb); } TEvhttpClientChannel::~TEvhttpClientChannel() { diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.h b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.h index 977495276..3515ca22f 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.h +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/async/TEvhttpClientChannel.h @@ -27,6 +27,7 @@ #include struct event_base; +struct evdns_base; struct evhttp_connection; struct evhttp_request; @@ -50,7 +51,8 @@ public: const std::string& path, const char* address, int port, - struct event_base* eb); + struct event_base* eb, + struct evdns_base *dnsbase = 0); ~TEvhttpClientChannel(); virtual void sendAndRecvMessage(const VoidCallback& cob, diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/server/TNonblockingServer.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/server/TNonblockingServer.cpp index d17f77c19..f89b5f793 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/server/TNonblockingServer.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/server/TNonblockingServer.cpp @@ -472,6 +472,18 @@ void TNonblockingServer::TConnection::workSocket() { } // size known; now get the rest of the frame transition(); + + // If the socket has more data than the frame header, continue to work on it. This is not strictly necessary for + // regular sockets, because if there is more data, libevent will fire the event handler registered for read + // readiness, which will in turn call workSocket(). However, some socket types (such as TSSLSocket) may have the + // data sitting in their internal buffers and from libevent's perspective, there is no further data available. In + // that case, not having this workSocket() call here would result in a hang as we will never get to work the socket, + // despite having more data. + if (tSocket_->hasPendingDataToRead()) + { + workSocket(); + } + return; case SOCKET_RECV: @@ -647,6 +659,7 @@ void TNonblockingServer::TConnection::transition() { return; } } + // fallthrough // Intentionally fall through here, the call to process has written into // the writeBuffer_ @@ -677,9 +690,6 @@ void TNonblockingServer::TConnection::transition() { appState_ = APP_SEND_RESULT; setWrite(); - // Try to work the socket immediately - // workSocket(); - return; } @@ -698,6 +708,7 @@ void TNonblockingServer::TConnection::transition() { server_->getIdleWriteBufferLimit()); callsForResize_ = 0; } + // fallthrough // N.B.: We also intentionally fall through here into the INIT state! @@ -718,9 +729,6 @@ void TNonblockingServer::TConnection::transition() { // Register read event setRead(); - // Try to work the socket right away - // workSocket(); - return; case APP_READ_FRAME_SIZE: @@ -753,9 +761,6 @@ void TNonblockingServer::TConnection::transition() { socketState_ = SOCKET_RECV; appState_ = APP_READ_REQUEST; - // Work the socket right away - workSocket(); - return; case APP_CLOSE_CONNECTION: @@ -1063,7 +1068,7 @@ void TNonblockingServer::expireClose(stdcxx::shared_ptr task) { connection->forceClose(); } -void TNonblockingServer::stop() { +void TNonblockingServer::stop() { // Breaks the event loop in all threads so that they end ASAP. for (uint32_t i = 0; i < ioThreads_.size(); ++i) { ioThreads_[i]->stop(); diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/PlatformSocket.h b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/PlatformSocket.h index 1890b607d..959105806 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/PlatformSocket.h +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/PlatformSocket.h @@ -51,6 +51,8 @@ # define THRIFT_LSEEK _lseek # define THRIFT_WRITE _write # define THRIFT_READ _read +# define THRIFT_IOCTL_SOCKET ioctlsocket +# define THRIFT_IOCTL_SOCKET_NUM_BYTES_TYPE u_long # define THRIFT_FSTAT _fstat # define THRIFT_STAT _stat # ifdef _WIN32_WCE @@ -111,6 +113,8 @@ # define THRIFT_LSEEK lseek # define THRIFT_WRITE write # define THRIFT_READ read +# define THRIFT_IOCTL_SOCKET ioctl +# define THRIFT_IOCTL_SOCKET_NUM_BYTES_TYPE int # define THRIFT_STAT stat # define THRIFT_FSTAT fstat # define THRIFT_GAI_STRERROR gai_strerror diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TBufferTransports.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TBufferTransports.cpp index 1d2692dca..9ac2f844b 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TBufferTransports.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TBufferTransports.cpp @@ -368,7 +368,7 @@ void TMemoryBuffer::ensureCanWrite(uint32_t len) { throw TTransportException(TTransportException::BAD_ARGS, "Internal buffer size overflow"); } - avail = available_write() + (new_size - bufferSize_); + avail = available_write() + (static_cast(new_size) - bufferSize_); } // Allocate into a new pointer so we don't bork ours if it fails. @@ -382,7 +382,7 @@ void TMemoryBuffer::ensureCanWrite(uint32_t len) { wBase_ = new_buffer + (wBase_ - buffer_); wBound_ = new_buffer + new_size; buffer_ = new_buffer; - bufferSize_ = new_size; + bufferSize_ = static_cast(new_size); } void TMemoryBuffer::writeSlow(const uint8_t* buf, uint32_t len) { diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/THttpTransport.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/THttpTransport.cpp index c97f6d3ac..31ae79f12 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/THttpTransport.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/THttpTransport.cpp @@ -84,8 +84,10 @@ uint32_t THttpTransport::readEnd() { uint32_t THttpTransport::readMoreData() { uint32_t size; - // Get more data! - refill(); + if (httpPos_ == httpBufLen_) { + // Get more data! + refill(); + } if (readHeaders_) { readHeaders(); diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.cpp index 3f0e28ed8..7071698ac 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.cpp @@ -249,6 +249,17 @@ TSSLSocket::~TSSLSocket() { close(); } +bool TSSLSocket::hasPendingDataToRead() { + if (!isOpen()) { + return false; + } + initializeHandshake(); + if (!checkHandshake()) + throw TSSLException("TSSLSocket::hasPendingDataToRead: Handshake is not completed"); + // data may be available in SSL buffers (note: SSL_pending does not have a failure mode) + return SSL_pending(ssl_) > 0 || TSocket::hasPendingDataToRead(); +} + void TSSLSocket::init() { handshakeCompleted_ = false; readRetryCount_ = 0; @@ -293,6 +304,7 @@ bool TSSLSocket::peek() { && (errno_copy != THRIFT_EAGAIN)) { break; } + // fallthrough case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: // in the case of SSL_ERROR_SYSCALL we want to wait for an read event again @@ -339,6 +351,7 @@ void TSSLSocket::close() { && (errno_copy != THRIFT_EAGAIN)) { break; } + // fallthrough case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: // in the case of SSL_ERROR_SYSCALL we want to wait for an write/read event again @@ -404,6 +417,8 @@ uint32_t TSSLSocket::read(uint8_t* buf, uint32_t len) { // a certain number break; } + // fallthrough + case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: if (isLibeventSafe()) { @@ -460,6 +475,7 @@ void TSSLSocket::write(const uint8_t* buf, uint32_t len) { && (errno_copy != THRIFT_EAGAIN)) { break; } + // fallthrough case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: if (isLibeventSafe()) { @@ -504,6 +520,7 @@ uint32_t TSSLSocket::write_partial(const uint8_t* buf, uint32_t len) { && (errno_copy != THRIFT_EAGAIN)) { break; } + // fallthrough case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: if (isLibeventSafe()) { @@ -591,6 +608,7 @@ void TSSLSocket::initializeHandshake() { && (errno_copy != THRIFT_EAGAIN)) { break; } + // fallthrough case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: if (isLibeventSafe()) { @@ -623,6 +641,7 @@ void TSSLSocket::initializeHandshake() { && (errno_copy != THRIFT_EAGAIN)) { break; } + // fallthrough case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: if (isLibeventSafe()) { diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.h b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.h index 852720930..ec30cc149 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.h +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSSLSocket.h @@ -78,6 +78,7 @@ public: bool peek(); void open(); void close(); + bool hasPendingDataToRead(); uint32_t read(uint8_t* buf, uint32_t len); void write(const uint8_t* buf, uint32_t len); uint32_t write_partial(const uint8_t* buf, uint32_t len); diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.cpp index d93d0ffd4..18cadbc06 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.cpp @@ -21,6 +21,9 @@ #include #include +#ifdef HAVE_SYS_IOCTL_H +#include +#endif #ifdef HAVE_SYS_SOCKET_H #include #endif @@ -167,6 +170,26 @@ TSocket::~TSocket() { close(); } +bool TSocket::hasPendingDataToRead() { + if (!isOpen()) { + return false; + } + + int32_t retries = 0; + THRIFT_IOCTL_SOCKET_NUM_BYTES_TYPE numBytesAvailable; +try_again: + int r = THRIFT_IOCTL_SOCKET(socket_, FIONREAD, &numBytesAvailable); + if (r == -1) { + int errno_copy = THRIFT_GET_SOCKET_ERROR; + if (errno_copy == THRIFT_EINTR && (retries++ < maxRecvRetries_)) { + goto try_again; + } + GlobalOutput.perror("TSocket::hasPendingDataToRead() THRIFT_IOCTL_SOCKET() " + getSocketInfo(), errno_copy); + throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy); + } + return numBytesAvailable > 0; +} + bool TSocket::isOpen() { return (socket_ != THRIFT_INVALID_SOCKET); } @@ -786,11 +809,15 @@ void TSocket::setMaxRecvRetries(int maxRecvRetries) { string TSocket::getSocketInfo() { std::ostringstream oss; - if (host_.empty() || port_ == 0) { - oss << ""; + if (path_.empty()) { + if (host_.empty() || port_ == 0) { + oss << ""; + } else { + oss << ""; + } } else { - oss << ""; + oss << ""; } return oss.str(); } diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.h b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.h index 1f95e68bd..66d9e6cd3 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.h +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/transport/TSocket.h @@ -84,7 +84,9 @@ public: virtual bool isOpen(); /** - * Calls select on the socket to see if there is more data available. + * Checks whether there is more data available in the socket to read. + * + * This call blocks until at least one byte is available or the socket is closed. */ virtual bool peek(); @@ -100,6 +102,17 @@ public: */ virtual void close(); + /** + * Determines whether there is pending data to read or not. + * + * This call does not block. + * \throws TTransportException of types: + * NOT_OPEN means the socket has been closed + * UNKNOWN means something unexpected happened + * \returns true if there is pending data to read, false otherwise + */ + virtual bool hasPendingDataToRead(); + /** * Reads from the underlying socket. * \returns the number of bytes read or 0 indicates EOF diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/windows/config.h b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/windows/config.h index a361eacb0..bc4aa42f8 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/windows/config.h +++ b/vendor/git.apache.org/thrift.git/lib/cpp/src/thrift/windows/config.h @@ -70,7 +70,6 @@ #pragma warning(disable : 4996) // Deprecated posix name. #endif -#define VERSION "0.11.0" #define HAVE_GETTIMEOFDAY 1 #define HAVE_SYS_STAT_H 1 diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/CMakeLists.txt b/vendor/git.apache.org/thrift.git/lib/cpp/test/CMakeLists.txt index 5c5ed180a..261382f65 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/test/CMakeLists.txt +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/CMakeLists.txt @@ -1,4 +1,4 @@ -# +# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -19,9 +19,6 @@ include_directories(SYSTEM "${Boost_INCLUDE_DIRS}") -add_definitions("-D__STDC_FORMAT_MACROS") -add_definitions("-D__STDC_LIMIT_MACROS") - if (WITH_DYN_LINK_TEST) add_definitions( -DBOOST_TEST_DYN_LINK ) endif() @@ -43,6 +40,10 @@ set(testgencpp_SOURCES gen-cpp/Recursive_types.h gen-cpp/ThriftTest_types.cpp gen-cpp/ThriftTest_types.h + gen-cpp/OneWayTest_types.cpp + gen-cpp/OneWayTest_types.h + gen-cpp/OneWayService.cpp + gen-cpp/OneWayService.h gen-cpp/TypedefTest_types.cpp gen-cpp/TypedefTest_types.h ThriftTest_extras.cpp @@ -71,6 +72,7 @@ target_link_libraries(Benchmark testgencpp) set(UnitTest_SOURCES UnitTestMain.cpp + OneWayHTTPTest.cpp TMemoryBufferTest.cpp TBufferBaseTest.cpp Base64Test.cpp @@ -394,6 +396,10 @@ add_custom_command(OUTPUT gen-cpp/SecondService.cpp gen-cpp/ThriftTest_constants COMMAND ${THRIFT_COMPILER} --gen cpp ${PROJECT_SOURCE_DIR}/test/ThriftTest.thrift ) +add_custom_command(OUTPUT gen-cpp/OneWayService.cpp gen-cpp/OneWayTest_constants.cpp gen-cpp/OneWayTest_types.h gen-cpp/OneWayService.h gen-cpp/OneWayTest_constants.h gen-cpp/OneWayTest_types.cpp + COMMAND ${THRIFT_COMPILER} --gen cpp ${CMAKE_CURRENT_SOURCE_DIR}/OneWayTest.thrift +) + add_custom_command(OUTPUT gen-cpp/ChildService.cpp gen-cpp/ChildService.h gen-cpp/ParentService.cpp gen-cpp/ParentService.h gen-cpp/proc_types.cpp gen-cpp/proc_types.h COMMAND ${THRIFT_COMPILER} --gen cpp:templates,cob_style ${CMAKE_CURRENT_SOURCE_DIR}/processor/proc.thrift ) diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/Makefile.am b/vendor/git.apache.org/thrift.git/lib/cpp/test/Makefile.am index c298e26f8..4b9f77d21 100755 --- a/vendor/git.apache.org/thrift.git/lib/cpp/test/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/Makefile.am @@ -28,6 +28,9 @@ BUILT_SOURCES = gen-cpp/AnnotationTest_types.h \ gen-cpp/ChildService.h \ gen-cpp/EmptyService.h \ gen-cpp/ParentService.h \ + gen-cpp/OneWayTest_types.h \ + gen-cpp/OneWayService.h \ + gen-cpp/OneWayTest_constants.h \ gen-cpp/proc_types.h noinst_LTLIBRARIES = libtestgencpp.la libprocessortest.la @@ -36,6 +39,8 @@ nodist_libtestgencpp_la_SOURCES = \ gen-cpp/AnnotationTest_types.h \ gen-cpp/DebugProtoTest_types.cpp \ gen-cpp/DebugProtoTest_types.h \ + gen-cpp/DoubleConstantsTest_constants.cpp \ + gen-cpp/DoubleConstantsTest_constants.h \ gen-cpp/EnumTest_types.cpp \ gen-cpp/EnumTest_types.h \ gen-cpp/OptionalRequiredTest_types.cpp \ @@ -48,6 +53,12 @@ nodist_libtestgencpp_la_SOURCES = \ gen-cpp/ThriftTest_constants.h \ gen-cpp/TypedefTest_types.cpp \ gen-cpp/TypedefTest_types.h \ + gen-cpp/OneWayService.cpp \ + gen-cpp/OneWayTest_constants.cpp \ + gen-cpp/OneWayTest_types.h \ + gen-cpp/OneWayService.h \ + gen-cpp/OneWayTest_constants.h \ + gen-cpp/OneWayTest_types.cpp \ ThriftTest_extras.cpp \ DebugProtoTest_extras.cpp @@ -93,6 +104,7 @@ check_PROGRAMS = \ link_test \ OpenSSLManualInitTest \ EnumTest \ + RenderedDoubleConstantsTest \ AnnotationTest if AMX_HAVE_LIBEVENT @@ -113,6 +125,7 @@ TESTS = \ UnitTests_SOURCES = \ UnitTestMain.cpp \ + OneWayHTTPTest.cpp \ TMemoryBufferTest.cpp \ TBufferBaseTest.cpp \ Base64Test.cpp \ @@ -130,7 +143,9 @@ endif UnitTests_LDADD = \ libtestgencpp.la \ - $(BOOST_TEST_LDADD) + $(BOOST_TEST_LDADD) \ + $(BOOST_SYSTEM_LDADD) \ + $(BOOST_THREAD_LDADD) TInterruptTest_SOURCES = \ TSocketInterruptTest.cpp \ @@ -190,6 +205,10 @@ EnumTest_LDADD = \ libtestgencpp.la \ $(BOOST_TEST_LDADD) +RenderedDoubleConstantsTest_SOURCES = RenderedDoubleConstantsTest.cpp + +RenderedDoubleConstantsTest_LDADD = libtestgencpp.la $(BOOST_TEST_LDADD) + AnnotationTest_SOURCES = \ AnnotationTest.cpp @@ -367,6 +386,10 @@ gen-cpp/AnnotationTest_constants.cpp gen-cpp/AnnotationTest_constants.h gen-cpp/ gen-cpp/DebugProtoTest_types.cpp gen-cpp/DebugProtoTest_types.h gen-cpp/EmptyService.cpp gen-cpp/EmptyService.h: $(top_srcdir)/test/DebugProtoTest.thrift $(THRIFT) --gen cpp $< +gen-cpp/DoubleConstantsTest_constants.cpp gen-cpp/DoubleConstantsTest_constants.h: $(top_srcdir)/test/DoubleConstantsTest.thrift + $(THRIFT) --gen cpp $< + + gen-cpp/EnumTest_types.cpp gen-cpp/EnumTest_types.h: $(top_srcdir)/test/EnumTest.thrift $(THRIFT) --gen cpp $< @@ -385,6 +408,9 @@ gen-cpp/Service.cpp gen-cpp/StressTest_types.cpp: $(top_srcdir)/test/StressTest. gen-cpp/SecondService.cpp gen-cpp/ThriftTest_constants.cpp gen-cpp/ThriftTest.cpp gen-cpp/ThriftTest_types.cpp gen-cpp/ThriftTest_types.h: $(top_srcdir)/test/ThriftTest.thrift $(THRIFT) --gen cpp $< +gen-cpp/OneWayService.cpp gen-cpp/OneWayTest_constants.cpp gen-cpp/OneWayTest_types.h gen-cpp/OneWayService.h gen-cpp/OneWayTest_constants.h gen-cpp/OneWayTest_types.cpp: OneWayTest.thrift + $(THRIFT) --gen cpp $< + gen-cpp/ChildService.cpp gen-cpp/ChildService.h gen-cpp/ParentService.cpp gen-cpp/ParentService.h gen-cpp/proc_types.cpp gen-cpp/proc_types.h: processor/proc.thrift $(THRIFT) --gen cpp:templates,cob_style $< @@ -401,4 +427,5 @@ EXTRA_DIST = \ qt \ CMakeLists.txt \ DebugProtoTest_extras.cpp \ - ThriftTest_extras.cpp + ThriftTest_extras.cpp \ + OneWayTest.thrift diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayHTTPTest.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayHTTPTest.cpp new file mode 100644 index 000000000..3fe63b612 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayHTTPTest.cpp @@ -0,0 +1,242 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "gen-cpp/OneWayService.h" + +BOOST_AUTO_TEST_SUITE(OneWayHTTPTest) + +using namespace apache::thrift; +using apache::thrift::protocol::TProtocol; +using apache::thrift::protocol::TBinaryProtocol; +using apache::thrift::protocol::TBinaryProtocolFactory; +using apache::thrift::protocol::TJSONProtocol; +using apache::thrift::protocol::TJSONProtocolFactory; +using apache::thrift::server::TThreadedServer; +using apache::thrift::server::TServerEventHandler; +using apache::thrift::transport::TTransport; +using apache::thrift::transport::THttpServer; +using apache::thrift::transport::THttpServerTransportFactory; +using apache::thrift::transport::THttpClient; +using apache::thrift::transport::TBufferedTransport; +using apache::thrift::transport::TBufferedTransportFactory; +using apache::thrift::transport::TMemoryBuffer; +using apache::thrift::transport::TServerSocket; +using apache::thrift::transport::TSocket; +using apache::thrift::transport::TTransportException; +using apache::thrift::stdcxx::shared_ptr; +using std::cout; +using std::cerr; +using std::endl; +using std::string; +namespace utf = boost::unit_test; + +// Define this env var to enable some logging (in case you need to debug) +#undef ENABLE_STDERR_LOGGING + +class OneWayServiceHandler : public onewaytest::OneWayServiceIf { +public: + OneWayServiceHandler() {} + + void roundTripRPC() override { +#ifdef ENABLE_STDERR_LOGGING + cerr << "roundTripRPC()" << endl; +#endif + } + void oneWayRPC() { +#ifdef ENABLE_STDERR_LOGGING + cerr << "oneWayRPC()" << std::endl ; +#endif + } +}; + +class OneWayServiceCloneFactory : virtual public onewaytest::OneWayServiceIfFactory { + public: + virtual ~OneWayServiceCloneFactory() {} + virtual onewaytest::OneWayServiceIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) + { + (void)connInfo ; + return new OneWayServiceHandler; + } + virtual void releaseHandler( onewaytest::OneWayServiceIf* handler) { + delete handler; + } +}; + +class RPC0ThreadClass { +public: + RPC0ThreadClass(TThreadedServer& server) : server_(server) { } // Constructor +~RPC0ThreadClass() { } // Destructor + +void Run() { + server_.serve() ; +} + TThreadedServer& server_ ; +} ; + +using apache::thrift::concurrency::Monitor; +using apache::thrift::concurrency::Mutex; +using apache::thrift::concurrency::Synchronized; + +// copied from IntegrationTest +class TServerReadyEventHandler : public TServerEventHandler, public Monitor { +public: + TServerReadyEventHandler() : isListening_(false), accepted_(0) {} + virtual ~TServerReadyEventHandler() {} + virtual void preServe() { + Synchronized sync(*this); + isListening_ = true; + notify(); + } + virtual void* createContext(shared_ptr input, + shared_ptr output) { + Synchronized sync(*this); + ++accepted_; + notify(); + + (void)input; + (void)output; + return NULL; + } + bool isListening() const { return isListening_; } + uint64_t acceptedCount() const { return accepted_; } + +private: + bool isListening_; + uint64_t accepted_; +}; + +class TBlockableBufferedTransport : public TBufferedTransport { + public: + TBlockableBufferedTransport(stdcxx::shared_ptr transport) + : TBufferedTransport(transport, 10240), + blocked_(false) { + } + + uint32_t write_buffer_length() { + uint32_t have_bytes = static_cast(wBase_ - wBuf_.get()); + return have_bytes ; + } + + void block() { + blocked_ = true ; +#ifdef ENABLE_STDERR_LOGGING + cerr << "block flushing\n" ; +#endif + } + void unblock() { + blocked_ = false ; +#ifdef ENABLE_STDERR_LOGGING + cerr << "unblock flushing, buffer is\n<<" << std::string((char *)wBuf_.get(), write_buffer_length()) << ">>\n" ; +#endif + } + + void flush() override { + if (blocked_) { +#ifdef ENABLE_STDERR_LOGGING + cerr << "flush was blocked\n" ; +#endif + return ; + } + TBufferedTransport::flush() ; + } + + bool blocked_ ; +} ; + +BOOST_AUTO_TEST_CASE( JSON_BufferedHTTP ) +{ + stdcxx::shared_ptr ss = stdcxx::make_shared(0) ; + TThreadedServer server( + stdcxx::make_shared(stdcxx::make_shared()), + ss, //port + stdcxx::make_shared(), + stdcxx::make_shared()); + + stdcxx::shared_ptr pEventHandler(new TServerReadyEventHandler) ; + server.setServerEventHandler(pEventHandler); + +#ifdef ENABLE_STDERR_LOGGING + cerr << "Starting the server...\n"; +#endif + RPC0ThreadClass t(server) ; + boost::thread thread(&RPC0ThreadClass::Run, &t); + + { + Synchronized sync(*(pEventHandler.get())); + while (!pEventHandler->isListening()) { + pEventHandler->wait(); + } + } + + int port = ss->getPort() ; +#ifdef ENABLE_STDERR_LOGGING + cerr << "port " << port << endl ; +#endif + + { + stdcxx::shared_ptr socket(new TSocket("localhost", port)); + socket->setRecvTimeout(10000) ; // 1000msec should be enough + stdcxx::shared_ptr blockable_transport(new TBlockableBufferedTransport(socket)); + stdcxx::shared_ptr transport(new THttpClient(blockable_transport, "localhost", "/service")); + stdcxx::shared_ptr protocol(new TJSONProtocol(transport)); + onewaytest::OneWayServiceClient client(protocol); + + + transport->open(); + client.roundTripRPC(); + blockable_transport->block() ; + uint32_t size0 = blockable_transport->write_buffer_length() ; + client.send_oneWayRPC() ; + uint32_t size1 = blockable_transport->write_buffer_length() ; + client.send_oneWayRPC() ; + uint32_t size2 = blockable_transport->write_buffer_length() ; + BOOST_CHECK((size1 - size0) == (size2 - size1)) ; + blockable_transport->unblock() ; + client.send_roundTripRPC() ; + blockable_transport->flush() ; + try { + client.recv_roundTripRPC() ; + } catch (TTransportException e) { + BOOST_ERROR( "we should not get a transport exception -- this means we failed: " + std::string(e.what()) ) ; + } + transport->close(); + } + server.stop(); + thread.join() ; +#ifdef ENABLE_STDERR_LOGGING + cerr << "finished.\n"; +#endif +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/pre_go17.go b/vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayTest.thrift similarity index 50% rename from vendor/git.apache.org/thrift.git/lib/go/test/tests/pre_go17.go rename to vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayTest.thrift index 8ab433121..127e9ffa3 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/pre_go17.go +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/OneWayTest.thrift @@ -1,5 +1,3 @@ -// +build !go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -17,32 +15,32 @@ * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. + * + * Contains some contributions under the Thrift Software License. + * Please see doc/old-thrift-license.txt in the Thrift distribution for + * details. */ -package tests +namespace c_glib OneWayTest +namespace java onewaytest +namespace cpp onewaytest +namespace rb Onewaytest +namespace perl OneWayTest +namespace csharp Onewaytest +namespace js OneWayTest +namespace st OneWayTest +namespace py OneWayTest +namespace py.twisted OneWayTest +namespace go onewaytest +namespace php OneWayTest +namespace delphi Onewaytest +namespace cocoa OneWayTest +namespace lua OneWayTest +namespace xsd test (uri = 'http://thrift.apache.org/ns/OneWayTest') +namespace netcore ThriftAsync.OneWayTest -import ( - "fmt" - - "golang.org/x/net/context" -) - -var defaultCtx = context.Background() - -type FirstImpl struct{} - -func (f *FirstImpl) ReturnOne(ctx context.Context) (r int64, err error) { - return 1, nil +// a minimal Thrift service, for use in OneWayHTTPTtest.cpp +service OneWayService { + void roundTripRPC(), + oneway void oneWayRPC() } - -type SecondImpl struct{} - -func (s *SecondImpl) ReturnTwo(ctx context.Context) (r int64, err error) { - return 2, nil -} - -type impl struct{} - -func (i *impl) Hi(ctx context.Context, in int64, s string) (err error) { fmt.Println("Hi!"); return } -func (i *impl) Emptyfunc(ctx context.Context) (err error) { return } -func (i *impl) EchoInt(ctx context.Context, param int64) (r int64, err error) { return param, nil } diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/RenderedDoubleConstantsTest.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/test/RenderedDoubleConstantsTest.cpp new file mode 100644 index 000000000..0ca042b73 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/RenderedDoubleConstantsTest.cpp @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +#define EPSILON 0.0000001 +#include +#include +#include + +#include "gen-cpp/DoubleConstantsTest_constants.h" +using namespace thrift::test; + +#define BOOST_TEST_MODULE RenderedDoubleConstantsTest +#include + +BOOST_AUTO_TEST_SUITE(RenderedDoubleConstantsTest) + +BOOST_AUTO_TEST_CASE(test_rendered_double_constants) { + const double EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0; + const double EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0; + const double EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0; + const double EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0; + const double EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359; + const double EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1; + const double EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1; + const double EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308; + const double EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43; + const double EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308; + const double EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43; + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, EPSILON); + BOOST_CHECK_CLOSE( + g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, EPSILON); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST).hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE).hash_code()); + BOOST_CHECK( + typeid(g_DoubleConstantsTest_constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST) + .hash_code() == + typeid(EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE).hash_code()); +} + +BOOST_AUTO_TEST_CASE(test_rendered_double_list) { + const std::vector EXPECTED_DOUBLE_LIST{1.0,-100.0,100.0,9223372036854775807.0,-9223372036854775807.0, + 3.14159265359,1000000.1,-1000000.1,1.7e+308,-1.7e+308,9223372036854775816.43,-9223372036854775816.43}; + BOOST_CHECK_EQUAL(g_DoubleConstantsTest_constants.DOUBLE_LIST_TEST.size(), EXPECTED_DOUBLE_LIST.size()); + for (unsigned int i = 0; i < EXPECTED_DOUBLE_LIST.size(); ++i) { + BOOST_CHECK_CLOSE(g_DoubleConstantsTest_constants.DOUBLE_LIST_TEST[i], EXPECTED_DOUBLE_LIST[i], EPSILON); + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/TNonblockingServerTest.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/test/TNonblockingServerTest.cpp index 74ffd1d54..63d8a0461 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/test/TNonblockingServerTest.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/TNonblockingServerTest.cpp @@ -178,7 +178,7 @@ private: protected: shared_ptr server; private: - shared_ptr thread; + shared_ptr thread; }; diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/Tests.cpp b/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/Tests.cpp index df5099d8b..fc0ba7f15 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/Tests.cpp +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/Tests.cpp @@ -27,7 +27,7 @@ // The test weight, where 10 is 10 times more threads than baseline // and the baseline is optimized for running in valgrind -static size_t WEIGHT = 10; +static int WEIGHT = 10; int main(int argc, char** argv) { diff --git a/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/TimerManagerTests.h b/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/TimerManagerTests.h index 3779b0d18..1c52c470b 100644 --- a/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/TimerManagerTests.h +++ b/vendor/git.apache.org/thrift.git/lib/cpp/test/concurrency/TimerManagerTests.h @@ -79,14 +79,13 @@ public: = shared_ptr(new TimerManagerTests::Task(_monitor, 10 * timeout)); { - TimerManager timerManager; - timerManager.threadFactory(shared_ptr(new PlatformThreadFactory())); - timerManager.start(); - - assert(timerManager.state() == TimerManager::STARTED); + if (timerManager.state() != TimerManager::STARTED) { + std::cerr << "timerManager is not in the STARTED state, but should be" << std::endl; + return false; + } // Don't create task yet, because its constructor sets the expected completion time, and we // need to delay between inserting the two tasks into the run queue. @@ -94,34 +93,27 @@ public: { Synchronized s(_monitor); - timerManager.add(orphanTask, 10 * timeout); - try { - // Wait for 1 second in order to give timerManager a chance to start sleeping in response - // to adding orphanTask. We need to do this so we can verify that adding the second task - // kicks the dispatcher out of the current wait and starts the new 1 second wait. - _monitor.wait(1000); - assert( - 0 == "ERROR: This wait should time out. TimerManager dispatcher may have a problem."); - } catch (TimedOutException&) { - } + THRIFT_SLEEP_USEC(timeout * 1000); task.reset(new TimerManagerTests::Task(_monitor, timeout)); - timerManager.add(task, timeout); - _monitor.wait(); } - assert(task->_done); + if (!task->_done) { + std::cerr << "task is not done, but it should have executed" << std::endl; + return false; + } std::cout << "\t\t\t" << (task->_success ? "Success" : "Failure") << "!" << std::endl; } - // timerManager.stop(); This is where it happens via destructor - - assert(!orphanTask->_done); + if (orphanTask->_done) { + std::cerr << "orphan task is done, but it should not have executed" << std::endl; + return false; + } return true; } diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj b/vendor/git.apache.org/thrift.git/lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj index 1ee29d16e..ae8608159 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj +++ b/vendor/git.apache.org/thrift.git/lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj @@ -45,7 +45,7 @@ false true 0 - 0.11.0.%2a + 1.0.0.%2a false false true diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/src/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/lib/csharp/src/Properties/AssemblyInfo.cs index ab6921e6b..dcbe74738 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/src/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/src/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: -[assembly: AssemblyVersion("0.11.0.1")] -[assembly: AssemblyFileVersion("0.11.0.1")] +[assembly: AssemblyVersion("1.0.0.1")] +[assembly: AssemblyFileVersion("1.0.0.1")] diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/src/Protocol/TJSONProtocol.cs b/vendor/git.apache.org/thrift.git/lib/csharp/src/Protocol/TJSONProtocol.cs index 5e6589e0b..0ceb675d7 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/src/Protocol/TJSONProtocol.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/src/Protocol/TJSONProtocol.cs @@ -544,11 +544,6 @@ namespace Thrift.Protocol int len = b.Length; int off = 0; - // Ignore padding - int bound = len >= 2 ? len - 2 : 0; - for (int i = len - 1; i >= bound && b[i] == '='; --i) { - --len; - } while (len >= 3) { // Encode 3 bytes at a time diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadPoolServer.cs b/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadPoolServer.cs index b7346b8a3..ec283dacf 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadPoolServer.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadPoolServer.cs @@ -212,60 +212,78 @@ namespace Thrift.Server /// private void Execute(Object threadContext) { - TTransport client = (TTransport)threadContext; - TProcessor processor = processorFactory.GetProcessor(client, this); - TTransport inputTransport = null; - TTransport outputTransport = null; - TProtocol inputProtocol = null; - TProtocol outputProtocol = null; - Object connectionContext = null; - try + using( TTransport client = (TTransport)threadContext) { - inputTransport = inputTransportFactory.GetTransport(client); - outputTransport = outputTransportFactory.GetTransport(client); - inputProtocol = inputProtocolFactory.GetProtocol(inputTransport); - outputProtocol = outputProtocolFactory.GetProtocol(outputTransport); - - //Recover event handler (if any) and fire createContext server event when a client connects - if (serverEventHandler != null) - connectionContext = serverEventHandler.createContext(inputProtocol, outputProtocol); - - //Process client requests until client disconnects - while (!stop) + TProcessor processor = processorFactory.GetProcessor(client, this); + TTransport inputTransport = null; + TTransport outputTransport = null; + TProtocol inputProtocol = null; + TProtocol outputProtocol = null; + Object connectionContext = null; + try { - if (!inputTransport.Peek()) - break; - - //Fire processContext server event - //N.B. This is the pattern implemented in C++ and the event fires provisionally. - //That is to say it may be many minutes between the event firing and the client request - //actually arriving or the client may hang up without ever makeing a request. + try + { + inputTransport = inputTransportFactory.GetTransport(client); + outputTransport = outputTransportFactory.GetTransport(client); + inputProtocol = inputProtocolFactory.GetProtocol(inputTransport); + outputProtocol = outputProtocolFactory.GetProtocol(outputTransport); + + //Recover event handler (if any) and fire createContext server event when a client connects + if (serverEventHandler != null) + connectionContext = serverEventHandler.createContext(inputProtocol, outputProtocol); + + //Process client requests until client disconnects + while (!stop) + { + if (!inputTransport.Peek()) + break; + + //Fire processContext server event + //N.B. This is the pattern implemented in C++ and the event fires provisionally. + //That is to say it may be many minutes between the event firing and the client request + //actually arriving or the client may hang up without ever makeing a request. + if (serverEventHandler != null) + serverEventHandler.processContext(connectionContext, inputTransport); + //Process client request (blocks until transport is readable) + if (!processor.Process(inputProtocol, outputProtocol)) + break; + } + } + catch (TTransportException) + { + //Usually a client disconnect, expected + } + catch (Exception x) + { + //Unexpected + logDelegate("Error: " + x); + } + + //Fire deleteContext server event after client disconnects if (serverEventHandler != null) - serverEventHandler.processContext(connectionContext, inputTransport); - //Process client request (blocks until transport is readable) - if (!processor.Process(inputProtocol, outputProtocol)) - break; + serverEventHandler.deleteContext(connectionContext, inputProtocol, outputProtocol); + + } + finally + { + //Close transports + if (inputTransport != null) + inputTransport.Close(); + if (outputTransport != null) + outputTransport.Close(); + + // disposable stuff should be disposed + if( inputProtocol != null) + inputProtocol.Dispose(); + if( outputProtocol != null) + outputProtocol.Dispose(); + if( inputTransport != null) + inputTransport.Dispose(); + if( outputTransport != null) + outputTransport.Dispose(); } } - catch (TTransportException) - { - //Usually a client disconnect, expected - } - catch (Exception x) - { - //Unexpected - logDelegate("Error: " + x); - } - - //Fire deleteContext server event after client disconnects - if (serverEventHandler != null) - serverEventHandler.deleteContext(connectionContext, inputProtocol, outputProtocol); - - //Close transports - if (inputTransport != null) - inputTransport.Close(); - if (outputTransport != null) - outputTransport.Close(); } public override void Stop() diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadedServer.cs b/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadedServer.cs index fe13dfd5b..3a1734aca 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadedServer.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/src/Server/TThreadedServer.cs @@ -192,32 +192,33 @@ namespace Thrift.Server private void ClientWorker(Object context) { - TTransport client = (TTransport)context; - TProcessor processor = processorFactory.GetProcessor(client); - TTransport inputTransport = null; - TTransport outputTransport = null; - TProtocol inputProtocol = null; - TProtocol outputProtocol = null; - Object connectionContext = null; - try - { - using (inputTransport = inputTransportFactory.GetTransport(client)) + using( TTransport client = (TTransport)context) + { + TProcessor processor = processorFactory.GetProcessor(client); + TTransport inputTransport = null; + TTransport outputTransport = null; + TProtocol inputProtocol = null; + TProtocol outputProtocol = null; + Object connectionContext = null; + try { - using (outputTransport = outputTransportFactory.GetTransport(client)) + try { + inputTransport = inputTransportFactory.GetTransport(client); + outputTransport = outputTransportFactory.GetTransport(client); inputProtocol = inputProtocolFactory.GetProtocol(inputTransport); outputProtocol = outputProtocolFactory.GetProtocol(outputTransport); - + //Recover event handler (if any) and fire createContext server event when a client connects if (serverEventHandler != null) connectionContext = serverEventHandler.createContext(inputProtocol, outputProtocol); - + //Process client requests until client disconnects while (!stop) { if (!inputTransport.Peek()) break; - + //Fire processContext server event //N.B. This is the pattern implemented in C++ and the event fires provisionally. //That is to say it may be many minutes between the event firing and the client request @@ -229,28 +230,42 @@ namespace Thrift.Server break; } } + catch (TTransportException) + { + //Usually a client disconnect, expected + } + catch (Exception x) + { + //Unexpected + logDelegate("Error: " + x); + } + + //Fire deleteContext server event after client disconnects + if (serverEventHandler != null) + serverEventHandler.deleteContext(connectionContext, inputProtocol, outputProtocol); + + lock (clientLock) + { + clientThreads.Remove(Thread.CurrentThread); + Monitor.Pulse(clientLock); + } + + } + finally + { + //Close transports + if (inputTransport != null) + inputTransport.Close(); + if (outputTransport != null) + outputTransport.Close(); + + // disposable stuff should be disposed + if (inputProtocol != null) + inputProtocol.Dispose(); + if (outputProtocol != null) + outputProtocol.Dispose(); } } - catch (TTransportException) - { - //Usually a client disconnect, expected - } - catch (Exception x) - { - //Unexpected - logDelegate("Error: " + x); - } - - //Fire deleteContext server event after client disconnects - if (serverEventHandler != null) - serverEventHandler.deleteContext(connectionContext, inputProtocol, outputProtocol); - - lock (clientLock) - { - clientThreads.Remove(Thread.CurrentThread); - Monitor.Pulse(clientLock); - } - return; } public override void Stop() diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/src/TApplicationException.cs b/vendor/git.apache.org/thrift.git/lib/csharp/src/TApplicationException.cs index 4c0d3a32e..409aa4dd9 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/src/TApplicationException.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/src/TApplicationException.cs @@ -137,5 +137,10 @@ namespace Thrift InvalidProtocol, UnsupportedClientType } + + public ExceptionType Type + { + get { return type; } + } } } diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/src/Thrift.csproj b/vendor/git.apache.org/thrift.git/lib/csharp/src/Thrift.csproj index 9040ca590..83bc4f7d0 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/src/Thrift.csproj +++ b/vendor/git.apache.org/thrift.git/lib/csharp/src/Thrift.csproj @@ -45,7 +45,7 @@ false true 0 - 0.11.0.%2a + 1.0.0.%2a false false true diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/test/JSON/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/lib/csharp/test/JSON/Properties/AssemblyInfo.cs index fe0c46b09..fdff4a1a5 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/test/JSON/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/test/JSON/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // Sie können alle Werte angeben oder die standardmäßigen Build- und Revisionsnummern // übernehmen, indem Sie "*" eingeben: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/MultiplexClient.csproj b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/MultiplexClient.csproj index ed9616bb0..6221e1418 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/MultiplexClient.csproj +++ b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/MultiplexClient.csproj @@ -46,7 +46,7 @@ false true 0 - 0.11.0.%2a + 1.0.0.%2a false true diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/Properties/AssemblyInfo.cs index e2d433e75..f686ded5c 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Client/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/MultiplexServer.csproj b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/MultiplexServer.csproj index e0ca45c9b..dc1d123e4 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/MultiplexServer.csproj +++ b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/MultiplexServer.csproj @@ -46,7 +46,7 @@ false true 0 - 0.11.0.%2a + 1.0.0.%2a false true diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/Properties/AssemblyInfo.cs index 8f63d65f2..5d405466a 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/test/Multiplex/Server/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/lib/csharp/test/ThriftMVCTest/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/lib/csharp/test/ThriftMVCTest/Properties/AssemblyInfo.cs index 8acb1c23c..1b1b833d9 100644 --- a/vendor/git.apache.org/thrift.git/lib/csharp/test/ThriftMVCTest/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/lib/csharp/test/ThriftMVCTest/Properties/AssemblyInfo.cs @@ -49,5 +49,5 @@ using System.Runtime.InteropServices; // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: -[assembly: AssemblyVersion("0.11.0.1")] -[assembly: AssemblyFileVersion("0.11.0.1")] +[assembly: AssemblyVersion("1.0.0.1")] +[assembly: AssemblyFileVersion("1.0.0.1")] diff --git a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/base.d b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/base.d index 8aea53613..38034a7b8 100644 --- a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/base.d +++ b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/base.d @@ -50,7 +50,7 @@ class TCompoundOperationException : TException { /// The Thrift version string, used for informative purposes. // Note: This is currently hardcoded, but will likely be filled in by the build // system in future versions. -enum VERSION = "0.11.0"; +enum VERSION = "1.0.0 dev"; /** * Functions used for logging inside Thrift. diff --git a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/simple.d b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/simple.d index f7183a751..5aba4c169 100644 --- a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/simple.d +++ b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/simple.d @@ -140,7 +140,9 @@ class TSimpleServer : TServer { } } } catch (TTransportException ttx) { - logError("Client died: %s", ttx); + if (ttx.type() != TTransportException.Type.END_OF_FILE) { + logError("Client died unexpectedly: %s", ttx); + } } catch (Exception e) { logError("Uncaught exception: %s", e); } diff --git a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/taskpool.d b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/taskpool.d index b4720a48e..670e720fc 100644 --- a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/taskpool.d +++ b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/taskpool.d @@ -268,7 +268,9 @@ protected: } } } catch (TTransportException ttx) { - logError("Client died: %s", ttx); + if (ttx.type() != TTransportException.Type.END_OF_FILE) { + logError("Client died unexpectedly: %s", ttx); + } } catch (Exception e) { logError("Uncaught exception: %s", e); } diff --git a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/threaded.d b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/threaded.d index 1cde983a4..300cc8457 100644 --- a/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/threaded.d +++ b/vendor/git.apache.org/thrift.git/lib/d/src/thrift/server/threaded.d @@ -173,7 +173,9 @@ private class WorkerThread : Thread { } } } catch (TTransportException ttx) { - logError("Client died: %s", ttx); + if (ttx.type() != TTransportException.Type.END_OF_FILE) { + logError("Client died unexpectedly: %s", ttx); + } } catch (Exception e) { logError("Uncaught exception: %s", e); } diff --git a/vendor/git.apache.org/thrift.git/lib/d/test/thrift_test_server.d b/vendor/git.apache.org/thrift.git/lib/d/test/thrift_test_server.d index 71ab9175d..b582253c7 100644 --- a/vendor/git.apache.org/thrift.git/lib/d/test/thrift_test_server.d +++ b/vendor/git.apache.org/thrift.git/lib/d/test/thrift_test_server.d @@ -16,8 +16,11 @@ * specific language governing permissions and limitations * under the License. */ + module thrift_test_server; +import core.stdc.errno : errno; +import core.stdc.signal : signal, sigfn_t, SIGINT, SIG_DFL, SIG_ERR; import core.thread : dur, Thread; import std.algorithm; import std.exception : enforce; @@ -40,6 +43,7 @@ import thrift.transport.buffered; import thrift.transport.framed; import thrift.transport.http; import thrift.transport.ssl; +import thrift.util.cancellation; import thrift.util.hashset; import test_utils; @@ -205,14 +209,44 @@ private: bool trace_; } +shared(bool) gShutdown = false; + +nothrow @nogc extern(C) void handleSignal(int sig) { + gShutdown = true; +} + +// Runs a thread that waits for shutdown to be +// signaled and then triggers cancellation, +// causing the server to stop. While we could +// use a signalfd for this purpose, we are instead +// opting for a busy waiting scheme for maximum +// portability since signalfd is a linux thing. + +class ShutdownThread : Thread { + this(TCancellationOrigin cancellation) { + cancellation_ = cancellation; + super(&run); + } + +private: + void run() { + while (!gShutdown) { + Thread.sleep(dur!("msecs")(25)); + } + cancellation_.trigger(); + } + + TCancellationOrigin cancellation_; +} + void main(string[] args) { ushort port = 9090; ServerType serverType; ProtocolType protocolType; size_t numIOThreads = 1; TransportType transportType; - bool ssl; - bool trace; + bool ssl = false; + bool trace = true; size_t taskPoolSize = totalCPUs; getopt(args, "port", &port, "protocol", &protocolType, "server-type", @@ -279,8 +313,26 @@ void main(string[] args) { auto server = createServer(serverType, numIOThreads, taskPoolSize, processor, serverSocket, transportFactory, protocolFactory); + // Set up SIGINT signal handling + sigfn_t oldHandler = signal(SIGINT, &handleSignal); + enforce(oldHandler != SIG_ERR, + "Could not replace the SIGINT signal handler: errno {0}".format(errno())); + + // Set up a server cancellation trigger + auto cancel = new TCancellationOrigin(); + + // Set up a listener for the shutdown condition - this will + // wake up when the signal occurs and trigger cancellation. + auto shutdown = new ShutdownThread(cancel); + shutdown.start(); + + // Serve from this thread; the signal will stop the server + // and control will return here writefln("Starting %s/%s %s ThriftTest server %son port %s...", protocolType, transportType, serverType, ssl ? "(using SSL) ": "", port); - server.serve(); + server.serve(cancel); + shutdown.join(); + signal(SIGINT, SIG_DFL); + writeln("done."); } diff --git a/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_framed_transport.dart b/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_framed_transport.dart index 80ccf2c55..2ef03f7f8 100644 --- a/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_framed_transport.dart +++ b/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_framed_transport.dart @@ -25,7 +25,14 @@ class TFramedTransport extends TBufferedTransport { final TTransport _transport; - final Uint8List headerBytes = new Uint8List(headerByteCount); + final Uint8List _headerBytes = new Uint8List(headerByteCount); + int _receivedHeaderBytes = 0; + + int _bodySize = 0; + Uint8List _body = null; + int _receivedBodyBytes = 0; + + Completer _frameCompleter = null; TFramedTransport(TTransport transport) : _transport = transport { if (transport == null) { @@ -51,33 +58,112 @@ class TFramedTransport extends TBufferedTransport { if (got > 0) return got; } - _readFrame(); + // IMPORTANT: by the time you've got here, + // an entire frame is available for reading return super.read(buffer, offset, length); } void _readFrame() { - _transport.readAll(headerBytes, 0, headerByteCount); - int size = headerBytes.buffer.asByteData().getUint32(0); - - if (size < 0) { - throw new TTransportError( - TTransportErrorType.UNKNOWN, "Read a negative frame size: $size"); + if (_body == null) { + bool gotFullHeader = _readFrameHeader(); + if (!gotFullHeader) { + return; + } } - Uint8List buffer = new Uint8List(size); - _transport.readAll(buffer, 0, size); - _setReadBuffer(buffer); + _readFrameBody(); + } + + bool _readFrameHeader() { + var remainingHeaderBytes = headerByteCount - _receivedHeaderBytes; + + int got = _transport.read(_headerBytes, _receivedHeaderBytes, remainingHeaderBytes); + if (got < 0) { + throw new TTransportError( + TTransportErrorType.UNKNOWN, "Socket closed during frame header read"); + } + + _receivedHeaderBytes += got; + + if (_receivedHeaderBytes == headerByteCount) { + int size = _headerBytes.buffer.asByteData().getUint32(0); + + _receivedHeaderBytes = 0; + + if (size < 0) { + throw new TTransportError( + TTransportErrorType.UNKNOWN, "Read a negative frame size: $size"); + } + + _bodySize = size; + _body = new Uint8List(_bodySize); + _receivedBodyBytes = 0; + + return true; + } else { + _registerForReadableBytes(); + return false; + } + } + + void _readFrameBody() { + var remainingBodyBytes = _bodySize - _receivedBodyBytes; + + int got = _transport.read(_body, _receivedBodyBytes, remainingBodyBytes); + if (got < 0) { + throw new TTransportError( + TTransportErrorType.UNKNOWN, "Socket closed during frame body read"); + } + + _receivedBodyBytes += got; + + if (_receivedBodyBytes == _bodySize) { + var body = _body; + + _bodySize = 0; + _body = null; + _receivedBodyBytes = 0; + + _setReadBuffer(body); + + var completer = _frameCompleter; + _frameCompleter = null; + completer.complete(new Uint8List(0)); + } else { + _registerForReadableBytes(); + } } Future flush() { - Uint8List buffer = consumeWriteBuffer(); - int length = buffer.length; + if (_frameCompleter == null) { + Uint8List buffer = consumeWriteBuffer(); + int length = buffer.length; - headerBytes.buffer.asByteData().setUint32(0, length); - _transport.write(headerBytes, 0, headerByteCount); - _transport.write(buffer, 0, length); + _headerBytes.buffer.asByteData().setUint32(0, length); + _transport.write(_headerBytes, 0, headerByteCount); + _transport.write(buffer, 0, length); - return _transport.flush(); + _frameCompleter = new Completer(); + _registerForReadableBytes(); + } + + return _frameCompleter.future; + } + + void _registerForReadableBytes() { + _transport.flush().then((_) { + _readFrame(); + }).catchError((e) { + var completer = _frameCompleter; + + _receivedHeaderBytes = 0; + _bodySize = 0; + _body = null; + _receivedBodyBytes = 0; + _frameCompleter = null; + + completer.completeError(e); + }); } } diff --git a/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_socket_transport.dart b/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_socket_transport.dart index 8dcdfdefc..c41374aff 100644 --- a/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_socket_transport.dart +++ b/vendor/git.apache.org/thrift.git/lib/dart/lib/src/transport/t_socket_transport.dart @@ -79,7 +79,9 @@ class TClientSocketTransport extends TSocketTransport { var completer = new Completer.sync(); _completers.add(completer); - socket.send(bytes); + if (bytes.lengthInBytes > 0) { + socket.send(bytes); + } return completer.future; } diff --git a/vendor/git.apache.org/thrift.git/lib/dart/pubspec.yaml b/vendor/git.apache.org/thrift.git/lib/dart/pubspec.yaml index 5d04226b4..f7aa8768a 100644 --- a/vendor/git.apache.org/thrift.git/lib/dart/pubspec.yaml +++ b/vendor/git.apache.org/thrift.git/lib/dart/pubspec.yaml @@ -16,7 +16,7 @@ # under the License. name: thrift -version: 0.11.0 +version: 1.0.0-dev description: > A Dart library for Apache Thrift author: Apache Thrift Developers @@ -33,7 +33,7 @@ dependencies: dev_dependencies: # test - mockito: ^0.11.0 + mockito: ^1.0.0 test: ^0.12.0 # dart_dev - https://github.com/Workiva/dart_dev diff --git a/vendor/git.apache.org/thrift.git/lib/dart/test/transport/t_framed_transport_test.dart b/vendor/git.apache.org/thrift.git/lib/dart/test/transport/t_framed_transport_test.dart new file mode 100644 index 000000000..e072e6877 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/dart/test/transport/t_framed_transport_test.dart @@ -0,0 +1,175 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +library thrift.test.transport.t_framed_transport_test; + +import 'dart:async'; +import 'dart:convert'; +import 'dart:typed_data' show Uint8List; + +import 'package:test/test.dart'; +import 'package:thrift/thrift.dart'; + +void main() { + group('TFramedTransport partial reads', () { + final flushAwaitDuration = new Duration(seconds: 10); + + FakeReadOnlySocket socket; + TSocketTransport socketTransport; + TFramedTransport transport; + var messageAvailable; + + setUp(() { + socket = new FakeReadOnlySocket(); + socketTransport = new TClientSocketTransport(socket); + transport = new TFramedTransport(socketTransport); + messageAvailable = false; + }); + + expectNoReadableBytes() { + var readBuffer = new Uint8List(128); + var readBytes = transport.read(readBuffer, 0, readBuffer.lengthInBytes); + expect(readBytes, 0); + expect(messageAvailable, false); + } + + test('Test transport reads messages where header and body are sent separately', () async { + // buffer into which we'll read + var readBuffer = new Uint8List(10); + var readBytes; + + // registers for readable bytes + var flushFuture = transport.flush().timeout(flushAwaitDuration); + flushFuture.then((_) { + messageAvailable = true; + }); + + // write header bytes + socket.messageController.add(new Uint8List.fromList([0x00, 0x00, 0x00, 0x06])); + + // you shouldn't be able to get any bytes from the read, + // because the header has been consumed internally + expectNoReadableBytes(); + + // write first batch of body + socket.messageController.add(new Uint8List.fromList(UTF8.encode("He"))); + + // you shouldn't be able to get any bytes from the read, + // because the frame has been consumed internally + expectNoReadableBytes(); + + // write second batch of body + socket.messageController.add(new Uint8List.fromList(UTF8.encode("llo!"))); + + // have to wait for the flush to complete, + // because it's only then that the frame is available for reading + await flushFuture; + expect(messageAvailable, true); + + // at this point the frame is complete, so we expect the read to complete + readBytes = transport.read(readBuffer, 0, readBuffer.lengthInBytes); + expect(readBytes, 6); + expect(readBuffer.sublist(0, 6), UTF8.encode("Hello!")); + }); + + test('Test transport reads messages where header is sent in pieces ' + 'and body is also sent in pieces', () async { + // buffer into which we'll read + var readBuffer = new Uint8List(10); + var readBytes; + + // registers for readable bytes + var flushFuture = transport.flush().timeout(flushAwaitDuration); + flushFuture.then((_) { + messageAvailable = true; + }); + + // write first part of header bytes + socket.messageController.add(new Uint8List.fromList([0x00, 0x00])); + + // you shouldn't be able to get any bytes from the read + expectNoReadableBytes(); + + // write second part of header bytes + socket.messageController.add(new Uint8List.fromList([0x00, 0x03])); + + // you shouldn't be able to get any bytes from the read again + // because only the header was read, and there's no frame body + readBytes = expectNoReadableBytes(); + + // write first batch of body + socket.messageController.add(new Uint8List.fromList(UTF8.encode("H"))); + + // you shouldn't be able to get any bytes from the read, + // because the frame has been consumed internally + expectNoReadableBytes(); + + // write second batch of body + socket.messageController.add(new Uint8List.fromList(UTF8.encode("i!"))); + + // have to wait for the flush to complete, + // because it's only then that the frame is available for reading + await flushFuture; + expect(messageAvailable, true); + + // at this point the frame is complete, so we expect the read to complete + readBytes = transport.read(readBuffer, 0, readBuffer.lengthInBytes); + expect(readBytes, 3); + expect(readBuffer.sublist(0, 3), UTF8.encode("Hi!")); + }); + }); +} + + + +class FakeReadOnlySocket extends TSocket { + + StreamController messageController = new StreamController(sync: true); + StreamController errorController = new StreamController(); + StreamController stateController = new StreamController(); + + @override + Future close() { + // noop + } + + @override + bool get isClosed => false; + + @override + bool get isOpen => true; + + @override + Stream get onError => errorController.stream; + + @override + Stream get onMessage => messageController.stream; + + @override + Stream get onState => stateController.stream; + + @override + Future open() { + // noop + } + + @override + void send(Uint8List data) { + // noop + } +} + diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Processor.Multiplex.pas b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Processor.Multiplex.pas index 4cd80ba8e..8cf23db07 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Processor.Multiplex.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Processor.Multiplex.pas @@ -53,11 +53,11 @@ uses type IMultiplexedProcessor = interface( IProcessor) - ['{810FF32D-22A2-4D58-B129-B0590703ECEC}'] + ['{807F9D19-6CF4-4789-840E-93E87A12EB63}'] // Register a service with this TMultiplexedProcessor. This allows us // to broker requests to individual services by using the service name // to select them at request time. - procedure RegisterProcessor( const serviceName : String; const processor : IProcessor); + procedure RegisterProcessor( const serviceName : String; const processor : IProcessor; const asDefault : Boolean = FALSE); end; @@ -76,6 +76,7 @@ type private FServiceProcessorMap : TDictionary; + FDefaultProcessor : IProcessor; procedure Error( const oprot : IProtocol; const msg : TThriftMessage; extype : TApplicationExceptionSpecializedClass; const etxt : string); @@ -87,7 +88,7 @@ type // Register a service with this TMultiplexedProcessorImpl. This allows us // to broker requests to individual services by using the service name // to select them at request time. - procedure RegisterProcessor( const serviceName : String; const processor : IProcessor); + procedure RegisterProcessor( const serviceName : String; const processor : IProcessor; const asDefault : Boolean = FALSE); { This implementation of process performs the following steps: - Read the beginning of the message. @@ -135,9 +136,15 @@ begin end; -procedure TMultiplexedProcessorImpl.RegisterProcessor( const serviceName : String; const processor : IProcessor); +procedure TMultiplexedProcessorImpl.RegisterProcessor( const serviceName : String; const processor : IProcessor; const asDefault : Boolean); begin FServiceProcessorMap.Add( serviceName, processor); + + if asDefault then begin + if FDefaultProcessor = nil + then FDefaultProcessor := processor + else raise TApplicationExceptionInternalError.Create('Only one default service allowed'); + end; end; @@ -184,28 +191,37 @@ begin end; // Extract the service name + // use FDefaultProcessor as fallback if there is no separator idx := Pos( TMultiplexedProtocol.SEPARATOR, msg.Name); - if idx < 1 then begin + if idx > 0 then begin + + // Create a new TMessage, something that can be consumed by any TProtocol + sService := Copy( msg.Name, 1, idx-1); + if not FServiceProcessorMap.TryGetValue( sService, processor) + then begin + Error( oprot, msg, + TApplicationExceptionInternalError, + Format(ERROR_UNKNOWN_SERVICE,[sService])); + Exit( FALSE); + end; + + // Create a new TMessage, removing the service name + Inc( idx, Length(TMultiplexedProtocol.SEPARATOR)); + Init( newMsg, Copy( msg.Name, idx, MAXINT), msg.Type_, msg.SeqID); + + end + else if FDefaultProcessor <> nil then begin + processor := FDefaultProcessor; + newMsg := msg; // no need to change + + end + else begin Error( oprot, msg, TApplicationExceptionInvalidProtocol, Format(ERROR_INCOMPATIBLE_PROT,[msg.Name])); Exit( FALSE); end; - // Create a new TMessage, something that can be consumed by any TProtocol - sService := Copy( msg.Name, 1, idx-1); - if not FServiceProcessorMap.TryGetValue( sService, processor) - then begin - Error( oprot, msg, - TApplicationExceptionInternalError, - Format(ERROR_UNKNOWN_SERVICE,[sService])); - Exit( FALSE); - end; - - // Create a new TMessage, removing the service name - Inc( idx, Length(TMultiplexedProtocol.SEPARATOR)); - Init( newMsg, Copy( msg.Name, idx, MAXINT), msg.Type_, msg.SeqID); - // Dispatch processing to the stored processor protocol := TStoredMessageProtocol.Create( iprot, newMsg); result := processor.process( protocol, oprot, events); @@ -213,4 +229,3 @@ end; end. - diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Stream.pas b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Stream.pas index b6e0cbfe9..3308c53a5 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Stream.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Stream.pas @@ -132,6 +132,7 @@ begin end; function TThriftStreamAdapterCOM.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; +var pTmp : PByte; begin inherited; @@ -141,7 +142,9 @@ begin Result := 0; if FStream <> nil then begin if count > 0 then begin - FStream.Read( @(PByteArray(pBuf)^[offset]), count, @Result); + pTmp := pBuf; + Inc( pTmp, offset); + FStream.Read( pTmp, count, @Result); end; end; end; @@ -172,11 +175,14 @@ end; procedure TThriftStreamAdapterCOM.Write( const pBuf: Pointer; offset: Integer; count: Integer); var nWritten : Integer; + pTmp : PByte; begin inherited; if IsOpen then begin if count > 0 then begin - FStream.Write( @(PByteArray(pBuf)^[offset]), count, @nWritten); + pTmp := pBuf; + Inc( pTmp, offset); + FStream.Write( pTmp, count, @nWritten); end; end; end; @@ -259,14 +265,18 @@ begin end; function TThriftStreamAdapterDelphi.Read(const pBuf : Pointer; const buflen : Integer; offset, count: Integer): Integer; +var pTmp : PByte; begin inherited; if count >= buflen-offset then count := buflen-offset; - if count > 0 - then Result := FStream.Read( PByteArray(pBuf)^[offset], count) + if count > 0 then begin + pTmp := pBuf; + Inc( pTmp, offset); + Result := FStream.Read( pTmp^, count) + end else Result := 0; end; @@ -296,10 +306,13 @@ begin end; procedure TThriftStreamAdapterDelphi.Write(const pBuf : Pointer; offset, count: Integer); +var pTmp : PByte; begin inherited; if count > 0 then begin - FStream.Write( PByteArray(pBuf)^[offset], count) + pTmp := pBuf; + Inc( pTmp, offset); + FStream.Write( pTmp^, count) end; end; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.Pipes.pas b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.Pipes.pas index aace4bb21..77a343b0c 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.Pipes.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.Pipes.pas @@ -328,6 +328,7 @@ end; procedure TPipeStreamBase.WriteDirect( const pBuf : Pointer; offset: Integer; count: Integer); var cbWritten, nBytes : DWORD; + pData : PByte; begin if not IsOpen then raise TTransportExceptionNotOpen.Create('Called write on non-open pipe'); @@ -336,11 +337,13 @@ begin // there's a system limit around 0x10000 bytes that we hit otherwise // MSDN: "Pipe write operations across a network are limited to 65,535 bytes per write. For more information regarding pipes, see the Remarks section." nBytes := Min( 15*4096, count); // 16 would exceed the limit + pData := pBuf; + Inc( pData, offset); while nBytes > 0 do begin - if not WriteFile( FPipe, PByteArray(pBuf)^[offset], nBytes, cbWritten, nil) + if not WriteFile( FPipe, pData^, nBytes, cbWritten, nil) then raise TTransportExceptionNotOpen.Create('Write to pipe failed'); - Inc( offset, cbWritten); + Inc( pData, cbWritten); Dec( count, cbWritten); nBytes := Min( nBytes, count); end; @@ -350,6 +353,7 @@ end; procedure TPipeStreamBase.WriteOverlapped( const pBuf : Pointer; offset: Integer; count: Integer); var cbWritten, dwWait, dwError, nBytes : DWORD; overlapped : IOverlappedHelper; + pData : PByte; begin if not IsOpen then raise TTransportExceptionNotOpen.Create('Called write on non-open pipe'); @@ -358,17 +362,21 @@ begin // there's a system limit around 0x10000 bytes that we hit otherwise // MSDN: "Pipe write operations across a network are limited to 65,535 bytes per write. For more information regarding pipes, see the Remarks section." nBytes := Min( 15*4096, count); // 16 would exceed the limit + pData := pBuf; + Inc( pData, offset); while nBytes > 0 do begin overlapped := TOverlappedHelperImpl.Create; - if not WriteFile( FPipe, PByteArray(pBuf)^[offset], nBytes, cbWritten, overlapped.OverlappedPtr) + if not WriteFile( FPipe, pData^, nBytes, cbWritten, overlapped.OverlappedPtr) then begin dwError := GetLastError; case dwError of ERROR_IO_PENDING : begin dwWait := overlapped.WaitFor(FTimeout); - if (dwWait = WAIT_TIMEOUT) - then raise TTransportExceptionTimedOut.Create('Pipe write timed out'); + if (dwWait = WAIT_TIMEOUT) then begin + CancelIo( FPipe); // prevents possible AV on invalid overlapped ptr + raise TTransportExceptionTimedOut.Create('Pipe write timed out'); + end; if (dwWait <> WAIT_OBJECT_0) or not GetOverlappedResult( FPipe, overlapped.Overlapped, cbWritten, TRUE) @@ -382,7 +390,7 @@ begin ASSERT( DWORD(nBytes) = cbWritten); - Inc( offset, cbWritten); + Inc( pData, cbWritten); Dec( count, cbWritten); nBytes := Min( nBytes, count); end; @@ -393,6 +401,7 @@ function TPipeStreamBase.ReadDirect( const pBuf : Pointer; const buflen : In var cbRead, dwErr, nRemaining : DWORD; bytes, retries : LongInt; bOk : Boolean; + pData : PByte; const INTERVAL = 10; // ms begin if not IsOpen @@ -427,14 +436,16 @@ begin result := 0; nRemaining := count; + pData := pBuf; + Inc( pData, offset); while nRemaining > 0 do begin // read the data (or block INFINITE-ly) - bOk := ReadFile( FPipe, PByteArray(pBuf)^[offset], nRemaining, cbRead, nil); + bOk := ReadFile( FPipe, pData^, nRemaining, cbRead, nil); if (not bOk) and (GetLastError() <> ERROR_MORE_DATA) then Break; // No more data, possibly because client disconnected. Dec( nRemaining, cbRead); - Inc( offset, cbRead); + Inc( pData, cbRead); Inc( result, cbRead); end; end; @@ -444,25 +455,30 @@ function TPipeStreamBase.ReadOverlapped( const pBuf : Pointer; const buflen : In var cbRead, dwWait, dwError, nRemaining : DWORD; bOk : Boolean; overlapped : IOverlappedHelper; + pData : PByte; begin if not IsOpen then raise TTransportExceptionNotOpen.Create('Called read on non-open pipe'); result := 0; nRemaining := count; + pData := pBuf; + Inc( pData, offset); while nRemaining > 0 do begin overlapped := TOverlappedHelperImpl.Create; // read the data - bOk := ReadFile( FPipe, PByteArray(pBuf)^[offset], nRemaining, cbRead, overlapped.OverlappedPtr); + bOk := ReadFile( FPipe, pData^, nRemaining, cbRead, overlapped.OverlappedPtr); if not bOk then begin dwError := GetLastError; case dwError of ERROR_IO_PENDING : begin dwWait := overlapped.WaitFor(FTimeout); - if (dwWait = WAIT_TIMEOUT) - then raise TTransportExceptionTimedOut.Create('Pipe read timed out'); + if (dwWait = WAIT_TIMEOUT) then begin + CancelIo( FPipe); // prevents possible AV on invalid overlapped ptr + raise TTransportExceptionTimedOut.Create('Pipe read timed out'); + end; if (dwWait <> WAIT_OBJECT_0) or not GetOverlappedResult( FPipe, overlapped.Overlapped, cbRead, TRUE) @@ -477,7 +493,7 @@ begin ASSERT( cbRead > 0); // see TTransportImpl.ReadAll() ASSERT( cbRead <= DWORD(nRemaining)); Dec( nRemaining, cbRead); - Inc( offset, cbRead); + Inc( pData, cbRead); Inc( result, cbRead); end; end; @@ -864,8 +880,10 @@ begin CreateNamedPipe; while not FConnected do begin - if QueryStopServer - then Abort; + if QueryStopServer then begin + InternalClose; + Abort; + end; if Assigned(fnAccepting) then fnAccepting(); diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.pas b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.pas index 52b617bb9..ea25c4ba1 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Transport.pas @@ -966,10 +966,11 @@ function TBufferedStreamImpl.Read( const pBuf : Pointer; const buflen : Integer; var nRead : Integer; tempbuf : TBytes; + pTmp : PByte; begin inherited; Result := 0; - + if IsOpen then begin while count > 0 do begin @@ -984,8 +985,10 @@ begin end; if FReadBuffer.Position < FReadBuffer.Size then begin - nRead := Min( FReadBuffer.Size - FReadBuffer.Position, count); - Inc( Result, FReadBuffer.Read( PByteArray(pBuf)^[offset], nRead)); + nRead := Min( FReadBuffer.Size - FReadBuffer.Position, count); + pTmp := pBuf; + Inc( pTmp, offset); + Inc( Result, FReadBuffer.Read( pTmp^, nRead)); Dec( count, nRead); Inc( offset, nRead); end; @@ -1011,11 +1014,14 @@ begin end; procedure TBufferedStreamImpl.Write( const pBuf : Pointer; offset: Integer; count: Integer); +var pTmp : PByte; begin inherited; if count > 0 then begin if IsOpen then begin - FWriteBuffer.Write( PByteArray(pBuf)^[offset], count ); + pTmp := pBuf; + Inc( pTmp, offset); + FWriteBuffer.Write( pTmp^, count ); if FWriteBuffer.Size > FBufSize then begin Flush; end; @@ -1066,7 +1072,7 @@ end; function TStreamTransportImpl.GetOutputStream: IThriftStream; begin - Result := FInputStream; + Result := FOutputStream; end; procedure TStreamTransportImpl.Open; @@ -1254,12 +1260,16 @@ begin end; function TFramedTransportImpl.Read( const pBuf : Pointer; const buflen : Integer; off: Integer; len: Integer): Integer; +var pTmp : PByte; begin if len > (buflen-off) then len := buflen-off; + pTmp := pBuf; + Inc( pTmp, off); + if (FReadBuffer <> nil) and (len > 0) then begin - result := FReadBuffer.Read( PByteArray(pBuf)^[off], len); + result := FReadBuffer.Read( pTmp^, len); if result > 0 then begin Exit; end; @@ -1267,7 +1277,7 @@ begin ReadFrame; if len > 0 - then Result := FReadBuffer.Read( PByteArray(pBuf)^[off], len) + then Result := FReadBuffer.Read( pTmp^, len) else Result := 0; end; @@ -1294,9 +1304,14 @@ begin end; procedure TFramedTransportImpl.Write( const pBuf : Pointer; off, len : Integer); +var pTmp : PByte; begin - if len > 0 - then FWriteBuffer.Write( PByteArray(pBuf)^[off], len ); + if len > 0 then begin + pTmp := pBuf; + Inc( pTmp, off); + + FWriteBuffer.Write( pTmp^, len ); + end; end; { TFramedTransport.TFactory } @@ -1482,7 +1497,7 @@ var wfd : TWaitForData; wsaError, msecs : Integer; nBytes : Integer; - pDest : PByte; + pTmp : PByte; begin inherited; @@ -1491,11 +1506,12 @@ begin else msecs := DEFAULT_THRIFT_TIMEOUT; result := 0; - pDest := @(PByteArray(pBuf)^[offset]); + pTmp := pBuf; + Inc( pTmp, offset); while count > 0 do begin while TRUE do begin - wfd := WaitForData( msecs, pDest, count, wsaError, nBytes); + wfd := WaitForData( msecs, pTmp, count, wsaError, nBytes); case wfd of TWaitForData.wfd_Error : Exit; TWaitForData.wfd_HaveData : Break; @@ -1519,8 +1535,8 @@ begin msecs := Max( msecs, 200); ASSERT( nBytes <= count); - nBytes := FTcpClient.ReceiveBuf( pDest^, nBytes); - Inc( pDest, nBytes); + nBytes := FTcpClient.ReceiveBuf( pTmp^, nBytes); + Inc( pTmp, nBytes); Dec( count, nBytes); Inc( result, nBytes); end; @@ -1546,6 +1562,7 @@ procedure TTcpSocketStreamImpl.Write( const pBuf : Pointer; offset, count: Integ // old sockets version var bCanWrite, bError : Boolean; retval, wsaError : Integer; + pTmp : PByte; begin inherited; @@ -1566,7 +1583,9 @@ begin if bError or not bCanWrite then raise TTransportExceptionUnknown.Create('unknown error'); - FTcpClient.SendBuf( PByteArray(pBuf)^[offset], count); + pTmp := pBuf; + Inc( pTmp, offset); + FTcpClient.SendBuf( pTmp^, count); end; {$ELSE} @@ -1574,16 +1593,17 @@ end; function TTcpSocketStreamImpl.Read( const pBuf : Pointer; const buflen : Integer; offset: Integer; count: Integer): Integer; // new sockets version var nBytes : Integer; - pDest : PByte; + pTmp : PByte; begin inherited; result := 0; - pDest := @(PByteArray(pBuf)^[offset]); + pTmp := pBuf; + Inc( pTmp, offset); while count > 0 do begin - nBytes := FTcpClient.Read(pDest^, count); + nBytes := FTcpClient.Read( pTmp^, count); if nBytes = 0 then Exit; - Inc( pDest, nBytes); + Inc( pTmp, nBytes); Dec( count, nBytes); Inc( result, nBytes); end; @@ -1610,13 +1630,16 @@ end; procedure TTcpSocketStreamImpl.Write( const pBuf : Pointer; offset, count: Integer); // new sockets version +var pTmp : PByte; begin inherited; if not FTcpClient.IsOpen then raise TTransportExceptionNotOpen.Create('not open'); - FTcpClient.Write( PByteArray(pBuf)^[offset], count); + pTmp := pBuf; + Inc( pTmp, offset); + FTcpClient.Write( pTmp^, count); end; {$ENDIF} diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Utils.pas b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Utils.pas index 89d021155..7e57863b6 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Utils.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Utils.pas @@ -206,8 +206,12 @@ end; class function CharUtils.IsHighSurrogate( const c : Char) : Boolean; begin - {$IF CompilerVersion < 23.0} - result := Character.IsHighSurrogate( c); + {$IF CompilerVersion < 25.0} + {$IFDEF OLD_UNIT_NAMES} + result := Character.IsHighSurrogate(c); + {$ELSE} + result := System.Character.IsHighSurrogate(c); + {$ENDIF} {$ELSE} result := c.IsHighSurrogate(); {$IFEND} @@ -216,10 +220,14 @@ end; class function CharUtils.IsLowSurrogate( const c : Char) : Boolean; begin - {$IF CompilerVersion < 23.0} - result := Character.IsLowSurrogate( c); + {$IF CompilerVersion < 25.0} + {$IFDEF OLD_UNIT_NAMES} + result := Character.IsLowSurrogate(c); + {$ELSE} + result := System.Character.IsLowSurrogate(c); + {$ENDIF} {$ELSE} - result := c.IsLowSurrogate; + result := c.IsLowSurrogate(); {$IFEND} end; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.pas b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.pas index edf20da94..6eca3c9cb 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.pas @@ -25,7 +25,7 @@ uses SysUtils, Thrift.Protocol; const - Version = '0.11.0'; + Version = '1.0.0-dev'; type TApplicationExceptionSpecializedClass = class of TApplicationExceptionSpecialized; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Console.pas b/vendor/git.apache.org/thrift.git/lib/delphi/test/ConsoleHelper.pas similarity index 96% rename from vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Console.pas rename to vendor/git.apache.org/thrift.git/lib/delphi/test/ConsoleHelper.pas index 1dbb30989..0a8ddcf10 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/src/Thrift.Console.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/ConsoleHelper.pas @@ -17,7 +17,7 @@ * under the License. *) -unit Thrift.Console; +unit ConsoleHelper; interface @@ -99,10 +99,9 @@ begin begin idx := FMemo.Count - 1; if idx < 0 then - begin - FMemo.Add( S ); - end; - FMemo[idx] := FMemo[idx] + S; + FMemo.Add( S ) + else + FMemo[idx] := FMemo[idx] + S; end; FLineBreak := bWriteLine; end; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/TestClient.pas b/vendor/git.apache.org/thrift.git/lib/delphi/test/TestClient.pas index 59b2a66c9..8c0108004 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/TestClient.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/TestClient.pas @@ -25,13 +25,19 @@ unit TestClient; {.$DEFINE PerfTest} // activate the performance test {$DEFINE Exceptions} // activate the exceptions test (or disable while debugging) +{$if CompilerVersion >= 28} +{$DEFINE SupportsAsync} +{$ifend} + interface uses Windows, SysUtils, Classes, Math, + {$IFDEF SupportsAsync} System.Threading, {$ENDIF} DateUtils, Generics.Collections, TestConstants, + ConsoleHelper, Thrift, Thrift.Protocol.Compact, Thrift.Protocol.JSON, @@ -41,8 +47,7 @@ uses Thrift.Stream, Thrift.Test, Thrift.Utils, - Thrift.Collections, - Thrift.Console; + Thrift.Collections; type TThreadConsole = class @@ -66,6 +71,13 @@ type ); TTestGroups = set of TTestGroup; + TTestSize = ( + Empty, // Edge case: the zero-length empty binary + Normal, // Fairly small array of usual size (256 bytes) + ByteArrayTest, // THRIFT-4454 Large writes/reads may cause range check errors in debug mode + PipeWriteLimit // THRIFT-4372 Pipe write operations across a network are limited to 65,535 bytes per write. + ); + private FTransport : ITransport; FProtocol : IProtocol; @@ -85,8 +97,12 @@ type function CalculateExitCode : Byte; procedure ClientTest; + {$IFDEF SupportsAsync} + procedure ClientAsyncTest; + {$ENDIF} + procedure JSONProtocolReadWriteTest; - function PrepareBinaryData( aRandomDist, aHuge : Boolean) : TBytes; + function PrepareBinaryData( aRandomDist : Boolean; aSize : TTestSize) : TBytes; {$IFDEF StressTest} procedure StressTest(const client : TThriftTest.Iface); {$ENDIF} @@ -177,6 +193,7 @@ end; class function TTestClient.Execute(const args: array of string) : Byte; var i : Integer; + threadExitCode : Byte; host : string; port : Integer; sPipeName : string; @@ -374,11 +391,13 @@ begin result := 0; for test := 0 to FNumThread - 1 do begin - result := result or threads[test].WaitFor; + threadExitCode := threads[test].WaitFor; + result := result or threadExitCode; end; - for test := 0 to FNumThread - 1 - do threads[test].Free; + for test := 0 to FNumThread - 1 do begin + threads[test].Free; + end; Console.Write('Total time: ' + IntToStr( MilliSecondsBetween(Now, dtStart))); @@ -455,6 +474,7 @@ var first_map : IThriftDictionary; second_map : IThriftDictionary; pair : TPair; + testsize : TTestSize; begin client := TThriftTest.TClient.Create( FProtocol); FTransport.Open; @@ -547,42 +567,18 @@ begin Expect( i64 = -34359738368, 'testI64(-34359738368) = ' + IntToStr( i64)); // random binary small - binOut := PrepareBinaryData( TRUE, FALSE); - Console.WriteLine('testBinary('+BytesToHex(binOut)+')'); - try - binIn := client.testBinary(binOut); - Expect( Length(binOut) = Length(binIn), 'testBinary(): length '+IntToStr(Length(binOut))+' = '+IntToStr(Length(binIn))); - i32 := Min( Length(binOut), Length(binIn)); - Expect( CompareMem( binOut, binIn, i32), 'testBinary('+BytesToHex(binOut)+') = '+BytesToHex(binIn)); - except - on e:TApplicationException do Console.WriteLine('testBinary(): '+e.Message); - on e:Exception do Expect( FALSE, 'testBinary(): Unexpected exception "'+e.ClassName+'": '+e.Message); - end; - - // random binary huge - binOut := PrepareBinaryData( TRUE, TRUE); - Console.WriteLine('testBinary('+BytesToHex(binOut)+')'); - try - binIn := client.testBinary(binOut); - Expect( Length(binOut) = Length(binIn), 'testBinary(): length '+IntToStr(Length(binOut))+' = '+IntToStr(Length(binIn))); - i32 := Min( Length(binOut), Length(binIn)); - Expect( CompareMem( binOut, binIn, i32), 'testBinary('+BytesToHex(binOut)+') = '+BytesToHex(binIn)); - except - on e:TApplicationException do Console.WriteLine('testBinary(): '+e.Message); - on e:Exception do Expect( FALSE, 'testBinary(): Unexpected exception "'+e.ClassName+'": '+e.Message); - end; - - // empty binary - SetLength( binOut, 0); - Console.WriteLine('testBinary('+BytesToHex(binOut)+')'); - try - binIn := client.testBinary(binOut); - Expect( Length(binOut) = Length(binIn), 'testBinary(): length '+IntToStr(Length(binOut))+' = '+IntToStr(Length(binIn))); - i32 := Min( Length(binOut), Length(binIn)); - Expect( CompareMem( binOut, binIn, i32), 'testBinary('+BytesToHex(binOut)+') = '+BytesToHex(binIn)); - except - on e:TApplicationException do Console.WriteLine('testBinary(): '+e.Message); - on e:Exception do Expect( FALSE, 'testBinary(): Unexpected exception "'+e.ClassName+'": '+e.Message); + for testsize := Low(TTestSize) to High(TTestSize) do begin + binOut := PrepareBinaryData( TRUE, testsize); + Console.WriteLine('testBinary('+BytesToHex(binOut)+')'); + try + binIn := client.testBinary(binOut); + Expect( Length(binOut) = Length(binIn), 'testBinary(): length '+IntToStr(Length(binOut))+' = '+IntToStr(Length(binIn))); + i32 := Min( Length(binOut), Length(binIn)); + Expect( CompareMem( binOut, binIn, i32), 'testBinary('+BytesToHex(binOut)+') = '+BytesToHex(binIn)); + except + on e:TApplicationException do Console.WriteLine('testBinary(): '+e.Message); + on e:Exception do Expect( FALSE, 'testBinary(): Unexpected exception "'+e.ClassName+'": '+e.Message); + end; end; Console.WriteLine('testDouble(5.325098235)'); @@ -1004,6 +1000,33 @@ begin end; +{$IFDEF SupportsAsync} +procedure TClientThread.ClientAsyncTest; +var + client : TThriftTest.IAsync; + s : string; + i8 : ShortInt; +begin + StartTestGroup( 'Async Tests', test_Unknown); + client := TThriftTest.TClient.Create( FProtocol); + FTransport.Open; + + // oneway void functions + client.testOnewayAsync(1).Wait; + Expect( TRUE, 'Test Oneway(1)'); // success := no exception + + // normal functions + s := client.testStringAsync(HUGE_TEST_STRING).Value; + Expect( length(s) = length(HUGE_TEST_STRING), + 'testString( length(HUGE_TEST_STRING) = '+IntToStr(Length(HUGE_TEST_STRING))+') ' + +'=> length(result) = '+IntToStr(Length(s))); + + i8 := client.testByte(1).Value; + Expect( i8 = 1, 'testByte(1) = ' + IntToStr( i8 )); +end; +{$ENDIF} + + {$IFDEF StressTest} procedure TClientThread.StressTest(const client : TThriftTest.Iface); begin @@ -1024,18 +1047,25 @@ end; {$ENDIF} -function TClientThread.PrepareBinaryData( aRandomDist, aHuge : Boolean) : TBytes; +function TClientThread.PrepareBinaryData( aRandomDist : Boolean; aSize : TTestSize) : TBytes; var i : Integer; begin - if aHuge - then SetLength( result, $12345) // tests for THRIFT-4372 - else SetLength( result, $100); + case aSize of + Empty : SetLength( result, 0); + Normal : SetLength( result, $100); + ByteArrayTest : SetLength( result, SizeOf(TByteArray) + 128); + PipeWriteLimit : SetLength( result, 65535 + 128); + else + raise EArgumentException.Create('aSize'); + end; + ASSERT( Low(result) = 0); + if Length(result) = 0 then Exit; // linear distribution, unless random is requested if not aRandomDist then begin for i := Low(result) to High(result) do begin - result[i] := i; + result[i] := i mod $100; end; Exit; end; @@ -1090,7 +1120,7 @@ begin StartTestGroup( 'JsonProtocolTest', test_Unknown); // prepare binary data - binary := PrepareBinaryData( FALSE, FALSE); + binary := PrepareBinaryData( FALSE, Normal); SetLength( emptyBinary, 0); // empty binary data block // output setup @@ -1303,12 +1333,15 @@ begin try {$IFDEF Win64} UseInterlockedExchangeAdd64; - {$ENDIF} + {$ENDIF} JSONProtocolReadWriteTest; for i := 0 to FNumIteration - 1 do begin ClientTest; + {$IFDEF SupportsAsync} + ClientAsyncTest; + {$ENDIF} end; except on e:Exception do Expect( FALSE, 'unexpected exception: "'+e.message+'"'); diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServer.pas b/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServer.pas index 97041d0b9..4400c342d 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServer.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServer.pas @@ -29,7 +29,6 @@ interface uses Windows, SysUtils, Generics.Collections, - Thrift.Console, Thrift.Server, Thrift.Transport, Thrift.Transport.Pipes, @@ -42,6 +41,7 @@ uses Thrift, TestConstants, TestServerEvents, + ConsoleHelper, Contnrs; type diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServerEvents.pas b/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServerEvents.pas index 2e776d21b..2208cd4ba 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServerEvents.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/TestServerEvents.pas @@ -27,7 +27,7 @@ uses Thrift.Protocol, Thrift.Transport, Thrift.Server, - Thrift.Console; + ConsoleHelper; type TRequestEventsImpl = class( TInterfacedObject, IRequestEvents) diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/client.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/client.dpr index f2e52505e..1791c18b8 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/client.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/client.dpr @@ -37,7 +37,6 @@ uses Thrift.Collections in '..\src\Thrift.Collections.pas', Thrift.Server in '..\src\Thrift.Server.pas', Thrift.Stream in '..\src\Thrift.Stream.pas', - Thrift.Console in '..\src\Thrift.Console.pas', Thrift.TypeRegistry in '..\src\Thrift.TypeRegistry.pas', Thrift.Utils in '..\src\Thrift.Utils.pas'; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Server.Main.pas b/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Server.Main.pas index 37f84bbfa..3860f5ace 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Server.Main.pas +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Server.Main.pas @@ -28,7 +28,6 @@ interface uses Windows, SysUtils, Generics.Collections, - Thrift.Console, Thrift.Server, Thrift.Transport, Thrift.Transport.Pipes, @@ -41,6 +40,7 @@ uses Benchmark, // in gen-delphi folder Aggr, // in gen-delphi folder Multiplex.Test.Common, + ConsoleHelper, Contnrs; type diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Client.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Client.dpr index d6f93a1d2..14d643137 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Client.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Client.dpr @@ -34,7 +34,6 @@ uses Thrift.Collections in '..\..\src\Thrift.Collections.pas', Thrift.Server in '..\..\src\Thrift.Server.pas', Thrift.Stream in '..\..\src\Thrift.Stream.pas', - Thrift.Console in '..\..\src\Thrift.Console.pas', Thrift.TypeRegistry in '..\..\src\Thrift.TypeRegistry.pas', Thrift.Utils in '..\..\src\Thrift.Utils.pas'; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Server.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Server.dpr index 555003604..0e51a9ca0 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Server.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/multiplexed/Multiplex.Test.Server.dpr @@ -24,6 +24,7 @@ program Multiplex.Test.Server; uses SysUtils, Multiplex.Server.Main in 'Multiplex.Server.Main.pas', + ConsoleHelper in '..\ConsoleHelper.pas', Thrift in '..\..\src\Thrift.pas', Thrift.Socket in '..\..\src\Thrift.Socket.pas', Thrift.Transport in '..\..\src\Thrift.Transport.pas', @@ -33,7 +34,6 @@ uses Thrift.Processor.Multiplex in '..\..\src\Thrift.Processor.Multiplex.pas', Thrift.Collections in '..\..\src\Thrift.Collections.pas', Thrift.Server in '..\..\src\Thrift.Server.pas', - Thrift.Console in '..\..\src\Thrift.Console.pas', Thrift.Utils in '..\..\src\Thrift.Utils.pas', Thrift.TypeRegistry in '..\..\src\Thrift.TypeRegistry.pas', Thrift.Stream in '..\..\src\Thrift.Stream.pas'; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/serializer/TestSerializer.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/serializer/TestSerializer.dpr index 14be502a4..a621b7e26 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/serializer/TestSerializer.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/serializer/TestSerializer.dpr @@ -30,7 +30,6 @@ uses Thrift.Protocol.JSON in '..\..\src\Thrift.Protocol.JSON.pas', Thrift.Collections in '..\..\src\Thrift.Collections.pas', Thrift.Server in '..\..\src\Thrift.Server.pas', - Thrift.Console in '..\..\src\Thrift.Console.pas', Thrift.Utils in '..\..\src\Thrift.Utils.pas', Thrift.Serializer in '..\..\src\Thrift.Serializer.pas', Thrift.Stream in '..\..\src\Thrift.Stream.pas', diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/server.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/server.dpr index d87a3313e..9af854222 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/server.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/server.dpr @@ -37,7 +37,6 @@ uses Thrift.Processor.Multiplex in '..\src\Thrift.Processor.Multiplex.pas', Thrift.Collections in '..\src\Thrift.Collections.pas', Thrift.Server in '..\src\Thrift.Server.pas', - Thrift.Console in '..\src\Thrift.Console.pas', Thrift.TypeRegistry in '..\src\Thrift.TypeRegistry.pas', Thrift.Utils in '..\src\Thrift.Utils.pas', Thrift.Stream in '..\src\Thrift.Stream.pas'; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version1.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version1.dpr index 40d025fbf..219db6d01 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version1.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version1.dpr @@ -31,7 +31,6 @@ uses Thrift.Protocol.JSON in '..\..\src\Thrift.Protocol.JSON.pas', Thrift.Collections in '..\..\src\Thrift.Collections.pas', Thrift.Server in '..\..\src\Thrift.Server.pas', - Thrift.Console in '..\..\src\Thrift.Console.pas', Thrift.Utils in '..\..\src\Thrift.Utils.pas', Thrift.TypeRegistry in '..\..\src\Thrift.TypeRegistry.pas', Thrift.Stream in '..\..\src\Thrift.Stream.pas'; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version2.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version2.dpr index 9cb6ff613..e0c31117e 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version2.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/skip/skiptest_version2.dpr @@ -31,7 +31,6 @@ uses Thrift.Protocol.JSON in '..\..\src\Thrift.Protocol.JSON.pas', Thrift.Collections in '..\..\src\Thrift.Collections.pas', Thrift.Server in '..\..\src\Thrift.Server.pas', - Thrift.Console in '..\..\src\Thrift.Console.pas', Thrift.Utils in '..\..\src\Thrift.Utils.pas', Thrift.TypeRegistry in '..\..\src\Thrift.TypeRegistry.pas', Thrift.Stream in '..\..\src\Thrift.Stream.pas'; diff --git a/vendor/git.apache.org/thrift.git/lib/delphi/test/typeregistry/TestTypeRegistry.dpr b/vendor/git.apache.org/thrift.git/lib/delphi/test/typeregistry/TestTypeRegistry.dpr index 3a77aae98..3a1fd2bca 100644 --- a/vendor/git.apache.org/thrift.git/lib/delphi/test/typeregistry/TestTypeRegistry.dpr +++ b/vendor/git.apache.org/thrift.git/lib/delphi/test/typeregistry/TestTypeRegistry.dpr @@ -30,7 +30,6 @@ uses Thrift.Protocol.JSON in '..\..\src\Thrift.Protocol.JSON.pas', Thrift.Collections in '..\..\src\Thrift.Collections.pas', Thrift.Server in '..\..\src\Thrift.Server.pas', - Thrift.Console in '..\..\src\Thrift.Console.pas', Thrift.Utils in '..\..\src\Thrift.Utils.pas', Thrift.Serializer in '..\..\src\Thrift.Serializer.pas', Thrift.Stream in '..\..\src\Thrift.Stream.pas', diff --git a/vendor/git.apache.org/thrift.git/lib/erl/Makefile.am b/vendor/git.apache.org/thrift.git/lib/erl/Makefile.am index 8867f884e..06323b4a7 100644 --- a/vendor/git.apache.org/thrift.git/lib/erl/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/erl/Makefile.am @@ -23,6 +23,7 @@ THRIFT_FILES = $(wildcard test/*.thrift) \ $(THRIFT_OMIT_FILE) \ ../../test/ConstantsDemo.thrift \ ../../test/NameConflictTest.thrift \ + ../../test/DoubleConstantsTest.thrift \ ../../test/ThriftTest.thrift if ERLANG_OTP16 diff --git a/vendor/git.apache.org/thrift.git/lib/erl/test/test_rendered_double_constants.erl b/vendor/git.apache.org/thrift.git/lib/erl/test/test_rendered_double_constants.erl new file mode 100644 index 000000000..87fce8130 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/erl/test/test_rendered_double_constants.erl @@ -0,0 +1,68 @@ +%% +%% Licensed to the Apache Software Foundation (ASF) under one +%% or more contributor license agreements. See the NOTICE file +%% distributed with this work for additional information +%% regarding copyright ownership. The ASF licenses this file +%% to you under the Apache License, Version 2.0 (the +%% "License"); you may not use this file except in compliance +%% with the License. You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% + +-module(test_rendered_double_constants). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +-include("gen-erl/double_constants_test_constants.hrl"). + +-define(EPSILON, 0.0000001). + +rendered_double_constants_test() -> + ?assert(abs(1.0 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST) =< ?EPSILON), + ?assert(abs(-100.0 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST) =< ?EPSILON), + ?assert(abs(9223372036854775807.0 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST) =< ?EPSILON), + ?assert(abs(-9223372036854775807.0 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST) =< ?EPSILON), + ?assert(abs(3.14159265359 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST) =< ?EPSILON), + ?assert(abs(1000000.1 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST) =< ?EPSILON), + ?assert(abs(-1000000.1 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST) =< ?EPSILON), + ?assert(abs(1.7e+308 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST) =< ?EPSILON), + ?assert(abs(9223372036854775816.43 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST) =< ?EPSILON), + ?assert(abs(-1.7e+308 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST) =< ?EPSILON), + ?assert(abs(-9223372036854775816.43 - ?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST) =< ?EPSILON), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST)), + ?assert(is_float(?DOUBLE_CONSTANTS_TEST_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST)). + +rendered_double_list_test() -> + ?assertEqual(12, length(?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)), + ?assert(abs(1.0 - lists:nth(1, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(-100.0 - lists:nth(2, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(100.0 - lists:nth(3, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(9223372036854775807.0 - lists:nth(4, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(-9223372036854775807.0 - lists:nth(5, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(3.14159265359 - lists:nth(6, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(1000000.1 - lists:nth(7, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(-1000000.1 - lists:nth(8, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(1.7e+308 - lists:nth(9, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(-1.7e+308 - lists:nth(10, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(9223372036854775816.43 - lists:nth(11, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON), + ?assert(abs(-9223372036854775816.43 - lists:nth(12, ?DOUBLE_CONSTANTS_TEST_DOUBLE_LIST_TEST)) =< ?EPSILON). + +-endif. %% TEST \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/go/Makefile.am b/vendor/git.apache.org/thrift.git/lib/go/Makefile.am index 0d5971014..0dfa5fadc 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/go/Makefile.am @@ -31,14 +31,12 @@ install: @echo '##############################################################' check-local: - GOPATH=`pwd` $(GO) get golang.org/x/net/context GOPATH=`pwd` $(GO) test -race ./thrift clean-local: $(RM) -rf pkg all-local: - GOPATH=`pwd` $(GO) get golang.org/x/net/context GOPATH=`pwd` $(GO) build ./thrift EXTRA_DIST = \ diff --git a/vendor/git.apache.org/thrift.git/lib/go/README.md b/vendor/git.apache.org/thrift.git/lib/go/README.md index 7440474c3..debc9ac68 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/README.md +++ b/vendor/git.apache.org/thrift.git/lib/go/README.md @@ -24,6 +24,8 @@ under the License. Using Thrift with Go ==================== +Thrift supports Go 1.7+ + In following Go conventions, we recommend you use the 'go' tool to install Thrift for go. diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/Makefile.am b/vendor/git.apache.org/thrift.git/lib/go/test/Makefile.am index 842f2de61..e93ec5caf 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/go/test/Makefile.am @@ -17,10 +17,6 @@ # under the License. # -if GOVERSION_LT_17 -COMPILER_EXTRAFLAG=",legacy_context" -endif - THRIFTARGS = -out gopath/src/ --gen go:thrift_import=thrift$(COMPILER_EXTRAFLAG) THRIFTTEST = $(top_srcdir)/test/ThriftTest.thrift @@ -59,7 +55,6 @@ gopath: $(THRIFT) $(THRIFTTEST) \ $(THRIFT) $(THRIFTARGS) InitialismsTest.thrift $(THRIFT) $(THRIFTARGS),read_write_private DontExportRWTest.thrift $(THRIFT) $(THRIFTARGS),ignore_initialisms IgnoreInitialismsTest.thrift - GOPATH=`pwd`/gopath $(GO) get golang.org/x/net/context GOPATH=`pwd`/gopath $(GO) get github.com/golang/mock/gomock || true sed -i 's/\"context\"/\"golang.org\/x\/net\/context\"/g' gopath/src/github.com/golang/mock/gomock/controller.go || true GOPATH=`pwd`/gopath $(GO) get github.com/golang/mock/gomock diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/dontexportrwtest/compile_test.go b/vendor/git.apache.org/thrift.git/lib/go/test/dontexportrwtest/compile_test.go index 2b877e3b0..cf6763e29 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/dontexportrwtest/compile_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/dontexportrwtest/compile_test.go @@ -20,7 +20,6 @@ package dontexportrwtest import ( - "fmt" "testing" ) @@ -29,10 +28,10 @@ import ( func TestReadWriteMethodsArePrivate(t *testing.T) { // This will only compile if read/write methods exist s := NewTestStruct() - fmt.Sprintf("%v", s.read) - fmt.Sprintf("%v", s.write) + _ = s.read + _ = s.write is := NewInnerStruct() - fmt.Sprintf("%v", is.read) - fmt.Sprintf("%v", is.write) + _ = is.read + _ = is.write } diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/client_error_test.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/client_error_test.go index 4a8ef1371..fdec4ea57 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/client_error_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/client_error_test.go @@ -20,6 +20,7 @@ package tests import ( + "context" "errors" "errortest" "testing" @@ -212,7 +213,7 @@ func prepareClientCallReply(protocol *MockTProtocol, failAt int, failWith error) if failAt == 25 { err = failWith } - last = protocol.EXPECT().Flush().Return(err).After(last) + last = protocol.EXPECT().Flush(context.Background()).Return(err).After(last) if failAt == 25 { return true } @@ -414,6 +415,7 @@ func TestClientReportTTransportErrors(t *testing.T) { client := errortest.NewErrorTestClient(thrift.NewTStandardClient(protocol, protocol)) _, retErr := client.TestStruct(defaultCtx, thing) mockCtrl.Finish() + mockCtrl = gomock.NewController(t) err2, ok := retErr.(thrift.TTransportException) if !ok { t.Fatal("Expected a TTrasportException") @@ -446,6 +448,7 @@ func TestClientReportTTransportErrorsLegacy(t *testing.T) { client := errortest.NewErrorTestClientProtocol(transport, protocol, protocol) _, retErr := client.TestStruct(defaultCtx, thing) mockCtrl.Finish() + mockCtrl = gomock.NewController(t) err2, ok := retErr.(thrift.TTransportException) if !ok { t.Fatal("Expected a TTrasportException") @@ -477,6 +480,7 @@ func TestClientReportTProtocolErrors(t *testing.T) { client := errortest.NewErrorTestClient(thrift.NewTStandardClient(protocol, protocol)) _, retErr := client.TestStruct(defaultCtx, thing) mockCtrl.Finish() + mockCtrl = gomock.NewController(t) err2, ok := retErr.(thrift.TProtocolException) if !ok { t.Fatal("Expected a TProtocolException") @@ -508,6 +512,7 @@ func TestClientReportTProtocolErrorsLegacy(t *testing.T) { client := errortest.NewErrorTestClientProtocol(transport, protocol, protocol) _, retErr := client.TestStruct(defaultCtx, thing) mockCtrl.Finish() + mockCtrl = gomock.NewController(t) err2, ok := retErr.(thrift.TProtocolException) if !ok { t.Fatal("Expected a TProtocolException") @@ -532,7 +537,7 @@ func prepareClientCallException(protocol *MockTProtocol, failAt int, failWith er last = protocol.EXPECT().WriteFieldStop().After(last) last = protocol.EXPECT().WriteStructEnd().After(last) last = protocol.EXPECT().WriteMessageEnd().After(last) - last = protocol.EXPECT().Flush().After(last) + last = protocol.EXPECT().Flush(context.Background()).After(last) // Reading the exception, might fail. if failAt == 0 { @@ -628,6 +633,7 @@ func TestClientCallException(t *testing.T) { client := errortest.NewErrorTestClient(thrift.NewTStandardClient(protocol, protocol)) _, retErr := client.TestString(defaultCtx, "test") mockCtrl.Finish() + mockCtrl = gomock.NewController(t) if !willComplete { err2, ok := retErr.(thrift.TTransportException) @@ -663,6 +669,7 @@ func TestClientCallExceptionLegacy(t *testing.T) { client := errortest.NewErrorTestClientProtocol(transport, protocol, protocol) _, retErr := client.TestString(defaultCtx, "test") mockCtrl.Finish() + mockCtrl = gomock.NewController(t) if !willComplete { err2, ok := retErr.(thrift.TTransportException) @@ -698,7 +705,7 @@ func TestClientSeqIdMismatch(t *testing.T) { protocol.EXPECT().WriteFieldStop(), protocol.EXPECT().WriteStructEnd(), protocol.EXPECT().WriteMessageEnd(), - protocol.EXPECT().Flush(), + protocol.EXPECT().Flush(context.Background()), protocol.EXPECT().ReadMessageBegin().Return("testString", thrift.REPLY, int32(2), nil), ) @@ -729,7 +736,7 @@ func TestClientSeqIdMismatchLegeacy(t *testing.T) { protocol.EXPECT().WriteFieldStop(), protocol.EXPECT().WriteStructEnd(), protocol.EXPECT().WriteMessageEnd(), - protocol.EXPECT().Flush(), + protocol.EXPECT().Flush(context.Background()), protocol.EXPECT().ReadMessageBegin().Return("testString", thrift.REPLY, int32(2), nil), ) @@ -758,7 +765,7 @@ func TestClientWrongMethodName(t *testing.T) { protocol.EXPECT().WriteFieldStop(), protocol.EXPECT().WriteStructEnd(), protocol.EXPECT().WriteMessageEnd(), - protocol.EXPECT().Flush(), + protocol.EXPECT().Flush(context.Background()), protocol.EXPECT().ReadMessageBegin().Return("unknown", thrift.REPLY, int32(1), nil), ) @@ -789,7 +796,7 @@ func TestClientWrongMethodNameLegacy(t *testing.T) { protocol.EXPECT().WriteFieldStop(), protocol.EXPECT().WriteStructEnd(), protocol.EXPECT().WriteMessageEnd(), - protocol.EXPECT().Flush(), + protocol.EXPECT().Flush(context.Background()), protocol.EXPECT().ReadMessageBegin().Return("unknown", thrift.REPLY, int32(1), nil), ) @@ -818,7 +825,7 @@ func TestClientWrongMessageType(t *testing.T) { protocol.EXPECT().WriteFieldStop(), protocol.EXPECT().WriteStructEnd(), protocol.EXPECT().WriteMessageEnd(), - protocol.EXPECT().Flush(), + protocol.EXPECT().Flush(context.Background()), protocol.EXPECT().ReadMessageBegin().Return("testString", thrift.INVALID_TMESSAGE_TYPE, int32(1), nil), ) @@ -849,7 +856,7 @@ func TestClientWrongMessageTypeLegacy(t *testing.T) { protocol.EXPECT().WriteFieldStop(), protocol.EXPECT().WriteStructEnd(), protocol.EXPECT().WriteMessageEnd(), - protocol.EXPECT().Flush(), + protocol.EXPECT().Flush(context.Background()), protocol.EXPECT().ReadMessageBegin().Return("testString", thrift.INVALID_TMESSAGE_TYPE, int32(1), nil), ) diff --git a/vendor/git.apache.org/thrift.git/tutorial/go/src/go17.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/context.go similarity index 94% rename from vendor/git.apache.org/thrift.git/tutorial/go/src/go17.go rename to vendor/git.apache.org/thrift.git/lib/go/test/tests/context.go index a6003a917..a93a82b8f 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/go/src/go17.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/context.go @@ -1,5 +1,3 @@ -// +build go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -19,8 +17,10 @@ * under the License. */ -package main +package tests -import "context" +import ( + "context" +) var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/multiplexed_protocol_test.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/multiplexed_protocol_test.go index 0b5896b60..61ac62828 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/multiplexed_protocol_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/multiplexed_protocol_test.go @@ -20,6 +20,7 @@ package tests import ( + "context" "multiplexedprotocoltest" "net" "testing" @@ -36,6 +37,18 @@ func FindAvailableTCPServerPort() net.Addr { } } +type FirstImpl struct{} + +func (f *FirstImpl) ReturnOne(ctx context.Context) (r int64, err error) { + return 1, nil +} + +type SecondImpl struct{} + +func (s *SecondImpl) ReturnTwo(ctx context.Context) (r int64, err error) { + return 2, nil +} + func createTransport(addr net.Addr) (thrift.TTransport, error) { socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) transport := thrift.NewTFramedTransport(socket) diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/one_way_test.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/one_way_test.go index 8abd671e6..48d0bbe38 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/one_way_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/one_way_test.go @@ -20,6 +20,8 @@ package tests import ( + "context" + "fmt" "net" "onewaytest" "testing" @@ -36,6 +38,12 @@ func findPort() net.Addr { } } +type impl struct{} + +func (i *impl) Hi(ctx context.Context, in int64, s string) (err error) { fmt.Println("Hi!"); return } +func (i *impl) Emptyfunc(ctx context.Context) (err error) { return } +func (i *impl) EchoInt(ctx context.Context, param int64) (r int64, err error) { return param, nil } + const TIMEOUT = time.Second var addr net.Addr diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/protocol_mock.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/protocol_mock.go index 8476c8661..51d7a02ff 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/protocol_mock.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/protocol_mock.go @@ -23,6 +23,7 @@ package tests import ( + "context" thrift "thrift" gomock "github.com/golang/mock/gomock" @@ -49,13 +50,13 @@ func (_m *MockTProtocol) EXPECT() *_MockTProtocolRecorder { return _m.recorder } -func (_m *MockTProtocol) Flush() error { +func (_m *MockTProtocol) Flush(ctx context.Context) error { ret := _m.ctrl.Call(_m, "Flush") ret0, _ := ret[0].(error) return ret0 } -func (_mr *_MockTProtocolRecorder) Flush() *gomock.Call { +func (_mr *_MockTProtocolRecorder) Flush(ctx context.Context) *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "Flush") } diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/required_fields_test.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/required_fields_test.go index 205371223..287ef60c3 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/required_fields_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/required_fields_test.go @@ -40,6 +40,7 @@ func TestStructReadRequiredFields(t *testing.T) { err := testStruct.Read(protocol) mockCtrl.Finish() + mockCtrl = gomock.NewController(t) if err == nil { t.Fatal("Expected read to fail") } @@ -63,6 +64,7 @@ func TestStructReadRequiredFields(t *testing.T) { err = testStruct.Read(protocol) mockCtrl.Finish() + mockCtrl = gomock.NewController(t) if err == nil { t.Fatal("Expected read to fail") } diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler.go index 6542fac58..31b9ee23e 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler.go +++ b/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler.go @@ -1,5 +1,3 @@ -// +build !go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -22,12 +20,11 @@ package tests import ( + "context" "errors" "thrift" "thrifttest" "time" - - "golang.org/x/net/context" ) type SecondServiceHandler struct { diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler_go17.go b/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler_go17.go deleted file mode 100644 index e022a3df1..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/thrifttest_handler_go17.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * 'License'); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package tests - -import ( - "context" - "errors" - "thrift" - "thrifttest" - "time" -) - -type SecondServiceHandler struct { -} - -func NewSecondServiceHandler() *SecondServiceHandler { - return &SecondServiceHandler{} -} - -func (p *SecondServiceHandler) BlahBlah(ctx context.Context) (err error) { - return nil -} - -func (p *SecondServiceHandler) SecondtestString(ctx context.Context, thing string) (r string, err error) { - return thing, nil -} - -type ThriftTestHandler struct { -} - -func NewThriftTestHandler() *ThriftTestHandler { - return &ThriftTestHandler{} -} - -func (p *ThriftTestHandler) TestVoid(ctx context.Context) (err error) { - return nil -} - -func (p *ThriftTestHandler) TestString(ctx context.Context, thing string) (r string, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestBool(ctx context.Context, thing bool) (r bool, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestByte(ctx context.Context, thing int8) (r int8, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestI32(ctx context.Context, thing int32) (r int32, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestI64(ctx context.Context, thing int64) (r int64, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestDouble(ctx context.Context, thing float64) (r float64, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestBinary(ctx context.Context, thing []byte) (r []byte, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestStruct(ctx context.Context, thing *thrifttest.Xtruct) (r *thrifttest.Xtruct, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestNest(ctx context.Context, thing *thrifttest.Xtruct2) (r *thrifttest.Xtruct2, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestMap(ctx context.Context, thing map[int32]int32) (r map[int32]int32, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestStringMap(ctx context.Context, thing map[string]string) (r map[string]string, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestSet(ctx context.Context, thing []int32) (r []int32, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestList(ctx context.Context, thing []int32) (r []int32, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestEnum(ctx context.Context, thing thrifttest.Numberz) (r thrifttest.Numberz, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestTypedef(ctx context.Context, thing thrifttest.UserId) (r thrifttest.UserId, err error) { - return thing, nil -} - -func (p *ThriftTestHandler) TestMapMap(ctx context.Context, hello int32) (r map[int32]map[int32]int32, err error) { - r = make(map[int32]map[int32]int32) - pos := make(map[int32]int32) - neg := make(map[int32]int32) - - for i := int32(1); i < 5; i++ { - pos[i] = i - neg[-i] = -i - } - r[4] = pos - r[-4] = neg - - return r, nil -} - -func (p *ThriftTestHandler) TestInsanity(ctx context.Context, argument *thrifttest.Insanity) (r map[thrifttest.UserId]map[thrifttest.Numberz]*thrifttest.Insanity, err error) { - hello := thrifttest.NewXtruct() - hello.StringThing = "Hello2" - hello.ByteThing = 2 - hello.I32Thing = 2 - hello.I64Thing = 2 - - goodbye := thrifttest.NewXtruct() - goodbye.StringThing = "Goodbye4" - goodbye.ByteThing = 4 - goodbye.I32Thing = 4 - goodbye.I64Thing = 4 - - crazy := thrifttest.NewInsanity() - crazy.UserMap = make(map[thrifttest.Numberz]thrifttest.UserId) - crazy.UserMap[thrifttest.Numberz_EIGHT] = 8 - crazy.UserMap[thrifttest.Numberz_FIVE] = 5 - crazy.Xtructs = []*thrifttest.Xtruct{goodbye, hello} - - first_map := make(map[thrifttest.Numberz]*thrifttest.Insanity) - second_map := make(map[thrifttest.Numberz]*thrifttest.Insanity) - - first_map[thrifttest.Numberz_TWO] = crazy - first_map[thrifttest.Numberz_THREE] = crazy - - looney := thrifttest.NewInsanity() - second_map[thrifttest.Numberz_SIX] = looney - - var insane = make(map[thrifttest.UserId]map[thrifttest.Numberz]*thrifttest.Insanity) - insane[1] = first_map - insane[2] = second_map - - return insane, nil -} - -func (p *ThriftTestHandler) TestMulti(ctx context.Context, arg0 int8, arg1 int32, arg2 int64, arg3 map[int16]string, arg4 thrifttest.Numberz, arg5 thrifttest.UserId) (r *thrifttest.Xtruct, err error) { - r = thrifttest.NewXtruct() - r.StringThing = "Hello2" - r.ByteThing = arg0 - r.I32Thing = arg1 - r.I64Thing = arg2 - return r, nil -} - -func (p *ThriftTestHandler) TestException(ctx context.Context, arg string) (err error) { - if arg == "Xception" { - x := thrifttest.NewXception() - x.ErrorCode = 1001 - x.Message = arg - return x - } else if arg == "TException" { - return thrift.TException(errors.New(arg)) - } else { - return nil - } -} - -func (p *ThriftTestHandler) TestMultiException(ctx context.Context, arg0 string, arg1 string) (r *thrifttest.Xtruct, err error) { - if arg0 == "Xception" { - x := thrifttest.NewXception() - x.ErrorCode = 1001 - x.Message = "This is an Xception" - return nil, x - } else if arg0 == "Xception2" { - x2 := thrifttest.NewXception2() - x2.ErrorCode = 2002 - x2.StructThing = thrifttest.NewXtruct() - x2.StructThing.StringThing = "This is an Xception2" - return nil, x2 - } - - res := thrifttest.NewXtruct() - res.StringThing = arg1 - return res, nil -} - -func (p *ThriftTestHandler) TestOneway(ctx context.Context, secondsToSleep int32) (err error) { - time.Sleep(time.Second * time.Duration(secondsToSleep)) - return nil -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception_test.go index b2687a6c8..77433575d 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception_test.go @@ -29,13 +29,13 @@ func TestTApplicationException(t *testing.T) { t.Fatalf("Expected empty string for exception but found '%s'", exc.Error()) } if exc.TypeId() != UNKNOWN_APPLICATION_EXCEPTION { - t.Fatalf("Expected type UNKNOWN for exception but found '%s'", exc.TypeId()) + t.Fatalf("Expected type UNKNOWN for exception but found '%v'", exc.TypeId()) } exc = NewTApplicationException(WRONG_METHOD_NAME, "junk_method") if exc.Error() != "junk_method" { t.Fatalf("Expected 'junk_method' for exception but found '%s'", exc.Error()) } if exc.TypeId() != WRONG_METHOD_NAME { - t.Fatalf("Expected type WRONG_METHOD_NAME for exception but found '%s'", exc.TypeId()) + t.Fatalf("Expected type WRONG_METHOD_NAME for exception but found '%v'", exc.TypeId()) } } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go index 690d34111..de0f6a7a5 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go @@ -21,6 +21,7 @@ package thrift import ( "bytes" + "context" "encoding/binary" "errors" "fmt" @@ -457,8 +458,8 @@ func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { return buf, NewTProtocolException(err) } -func (p *TBinaryProtocol) Flush() (err error) { - return NewTProtocolException(p.trans.Flush()) +func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) } func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go index b754f925d..96702061b 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go @@ -21,6 +21,7 @@ package thrift import ( "bufio" + "context" ) type TBufferedTransportFactory struct { @@ -78,12 +79,12 @@ func (p *TBufferedTransport) Write(b []byte) (int, error) { return n, err } -func (p *TBufferedTransport) Flush() error { +func (p *TBufferedTransport) Flush(ctx context.Context) error { if err := p.ReadWriter.Flush(); err != nil { p.ReadWriter.Writer.Reset(p.tp) return err } - return p.tp.Flush() + return p.tp.Flush(ctx) } func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go index 8bdb53d8d..28791ccd0 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go @@ -1,6 +1,13 @@ package thrift -import "fmt" +import ( + "context" + "fmt" +) + +type TClient interface { + Call(ctx context.Context, method string, args, result TStruct) error +} type TStandardClient struct { seqId int32 @@ -16,7 +23,7 @@ func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClien } } -func (p *TStandardClient) Send(oprot TProtocol, seqId int32, method string, args TStruct) error { +func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil { return err } @@ -26,7 +33,7 @@ func (p *TStandardClient) Send(oprot TProtocol, seqId int32, method string, args if err := oprot.WriteMessageEnd(); err != nil { return err } - return oprot.Flush() + return oprot.Flush(ctx) } func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error { @@ -61,11 +68,11 @@ func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, resu return iprot.ReadMessageEnd() } -func (p *TStandardClient) call(method string, args, result TStruct) error { +func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { p.seqId++ seqId := p.seqId - if err := p.Send(p.oprot, seqId, method, args); err != nil { + if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { return err } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/client_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/client_go17.go deleted file mode 100644 index 15c1c52ca..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/client_go17.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.7 - -package thrift - -import "context" - -type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) error -} - -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { - return p.call(method, args, result) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/client_pre_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/client_pre_go17.go deleted file mode 100644 index d2e99ef2a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/client_pre_go17.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package thrift - -import "golang.org/x/net/context" - -type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) error -} - -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { - return p.call(method, args, result) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/common_test_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/common_test.go similarity index 98% rename from vendor/git.apache.org/thrift.git/lib/go/thrift/common_test_go17.go rename to vendor/git.apache.org/thrift.git/lib/go/thrift/common_test.go index 2c729a226..93597ff8a 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/common_test_go17.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/common_test.go @@ -1,5 +1,3 @@ -// +build go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go index 0bc5fddeb..66fbf5c33 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "encoding/binary" "fmt" "io" @@ -599,8 +600,8 @@ func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { return buf, NewTProtocolException(e) } -func (p *TCompactProtocol) Flush() (err error) { - return NewTProtocolException(p.trans.Flush()) +func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) } func (p *TCompactProtocol) Skip(fieldType TType) (err error) { @@ -806,7 +807,7 @@ func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { case COMPACT_STRUCT: return STRUCT, nil } - return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f)) + return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f)) } // Given a TType value, find the appropriate TCompactProtocol.Types constant. diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go similarity index 98% rename from vendor/git.apache.org/thrift.git/lib/go/thrift/go17.go rename to vendor/git.apache.org/thrift.git/lib/go/thrift/context.go index e3b21c4b7..d15c1bcf8 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/go17.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go @@ -1,5 +1,3 @@ -// +build go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go index d37252cc6..57943e0f3 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "log" ) @@ -258,8 +259,8 @@ func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) return } -func (tdp *TDebugProtocol) Flush() (err error) { - err = tdp.Delegate.Flush() +func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { + err = tdp.Delegate.Flush(ctx) log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) return } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go index 60b124991..81fa65aaa 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go @@ -22,6 +22,7 @@ package thrift import ( "bufio" "bytes" + "context" "encoding/binary" "fmt" "io" @@ -135,7 +136,7 @@ func (p *TFramedTransport) WriteString(s string) (n int, err error) { return p.buf.WriteString(s) } -func (p *TFramedTransport) Flush() error { +func (p *TFramedTransport) Flush(ctx context.Context) error { size := p.buf.Len() buf := p.buffer[:4] binary.BigEndian.PutUint32(buf, uint32(size)) @@ -151,7 +152,7 @@ func (p *TFramedTransport) Flush() error { return NewTTransportExceptionFromError(err) } } - err = p.transport.Flush() + err = p.transport.Flush(ctx) return NewTTransportExceptionFromError(err) } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go index 33f2aa4b5..5c82bf538 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go @@ -21,6 +21,7 @@ package thrift import ( "bytes" + "context" "io" "io/ioutil" "net/http" @@ -181,7 +182,7 @@ func (p *THttpClient) WriteString(s string) (n int, err error) { return p.requestBuffer.WriteString(s) } -func (p *THttpClient) Flush() error { +func (p *THttpClient) Flush(ctx context.Context) error { // Close any previous response body to avoid leaking connections. p.closeResponse() @@ -190,6 +191,9 @@ func (p *THttpClient) Flush() error { return NewTTransportExceptionFromError(err) } req.Header = p.header + if ctx != nil { + req = req.WithContext(ctx) + } response, err := p.client.Do(req) if err != nil { return NewTTransportExceptionFromError(err) diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go index 601855b92..66f0f388a 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go @@ -26,6 +26,18 @@ import ( "strings" ) +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} + // gz transparently compresses the HTTP response if the client supports it. func gz(handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_go17.go deleted file mode 100644 index 1313ac225..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_go17.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net/http" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return gz(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - }) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_pre_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_pre_go17.go deleted file mode 100644 index 13aa1c11d..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport_pre_go17.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return gz(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(context.Background(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - }) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go index b18be81c4..fea93bcef 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go @@ -21,6 +21,7 @@ package thrift import ( "bufio" + "context" "io" ) @@ -138,7 +139,7 @@ func (p *StreamTransport) Close() error { } // Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush() error { +func (p *StreamTransport) Flush(ctx context.Context) error { if p.Writer == nil { return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go index 442fa9144..7be685d43 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "encoding/base64" "fmt" ) @@ -438,10 +439,10 @@ func (p *TJSONProtocol) ReadBinary() ([]byte, error) { return v, p.ParsePostValue() } -func (p *TJSONProtocol) Flush() (err error) { +func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { err = p.writer.Flush() if err == nil { - err = p.trans.Flush() + err = p.trans.Flush(ctx) } return NewTProtocolException(err) } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol_test.go index 7104ce3a0..59c4d64a2 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol_test.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -36,7 +37,7 @@ func TestWriteJSONProtocolBool(t *testing.T) { if e := p.WriteBool(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -68,7 +69,7 @@ func TestReadJSONProtocolBool(t *testing.T) { } else { trans.Write([]byte{'0'}) // not JSON_FALSE } - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadBool() if e != nil { @@ -94,7 +95,7 @@ func TestWriteJSONProtocolByte(t *testing.T) { if e := p.WriteByte(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -116,7 +117,7 @@ func TestReadJSONProtocolByte(t *testing.T) { trans := NewTMemoryBuffer() p := NewTJSONProtocol(trans) trans.WriteString(strconv.Itoa(int(value))) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadByte() if e != nil { @@ -141,7 +142,7 @@ func TestWriteJSONProtocolI16(t *testing.T) { if e := p.WriteI16(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -163,7 +164,7 @@ func TestReadJSONProtocolI16(t *testing.T) { trans := NewTMemoryBuffer() p := NewTJSONProtocol(trans) trans.WriteString(strconv.Itoa(int(value))) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI16() if e != nil { @@ -188,7 +189,7 @@ func TestWriteJSONProtocolI32(t *testing.T) { if e := p.WriteI32(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -210,7 +211,7 @@ func TestReadJSONProtocolI32(t *testing.T) { trans := NewTMemoryBuffer() p := NewTJSONProtocol(trans) trans.WriteString(strconv.Itoa(int(value))) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI32() if e != nil { @@ -235,7 +236,7 @@ func TestWriteJSONProtocolI64(t *testing.T) { if e := p.WriteI64(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -257,7 +258,7 @@ func TestReadJSONProtocolI64(t *testing.T) { trans := NewTMemoryBuffer() p := NewTJSONProtocol(trans) trans.WriteString(strconv.FormatInt(value, 10)) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI64() if e != nil { @@ -282,7 +283,7 @@ func TestWriteJSONProtocolDouble(t *testing.T) { if e := p.WriteDouble(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -319,7 +320,7 @@ func TestReadJSONProtocolDouble(t *testing.T) { p := NewTJSONProtocol(trans) n := NewNumericFromDouble(value) trans.WriteString(n.String()) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadDouble() if e != nil { @@ -358,7 +359,7 @@ func TestWriteJSONProtocolString(t *testing.T) { if e := p.WriteString(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -380,7 +381,7 @@ func TestReadJSONProtocolString(t *testing.T) { trans := NewTMemoryBuffer() p := NewTJSONProtocol(trans) trans.WriteString(jsonQuote(value)) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadString() if e != nil { @@ -409,7 +410,7 @@ func TestWriteJSONProtocolBinary(t *testing.T) { if e := p.WriteBinary(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -441,7 +442,7 @@ func TestReadJSONProtocolBinary(t *testing.T) { trans := NewTMemoryBuffer() p := NewTJSONProtocol(trans) trans.WriteString(jsonQuote(b64String)) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadBinary() if e != nil { @@ -474,7 +475,7 @@ func TestWriteJSONProtocolList(t *testing.T) { } } p.WriteListEnd() - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) } str := trans.String() @@ -528,7 +529,7 @@ func TestWriteJSONProtocolSet(t *testing.T) { } } p.WriteSetEnd() - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) } str := trans.String() @@ -585,12 +586,12 @@ func TestWriteJSONProtocolMap(t *testing.T) { } } p.WriteMapEnd() - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) } str := trans.String() if str[0] != '[' || str[len(str)-1] != ']' { - t.Fatalf("Bad value for %s, wrote: %q, in go: %q", thetype, str, DOUBLE_VALUES) + t.Fatalf("Bad value for %s, wrote: %v, in go: %v", thetype, str, DOUBLE_VALUES) } expectedKeyType, expectedValueType, expectedSize, err := p.ReadMapBegin() if err != nil { diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go index 97a4edfa5..5936d2730 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go @@ -21,6 +21,7 @@ package thrift import ( "bytes" + "context" ) // Memory buffer-based implementation of the TTransport interface. @@ -70,7 +71,7 @@ func (p *TMemoryBuffer) Close() error { } // Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush() error { +func (p *TMemoryBuffer) Flush(ctx context.Context) error { return nil } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go index b7f4f8a1c..d028a30b3 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go @@ -19,6 +19,12 @@ package thrift +import ( + "context" + "fmt" + "strings" +) + /* TMultiplexedProtocol is a protocol-independent concrete decorator that allows a Thrift client to communicate with a multiplexing Thrift server, @@ -122,6 +128,31 @@ func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProces t.serviceProcessorMap[name] = processor } +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin() + if err != nil { + return false, err + } + if typeId != CALL && typeId != ONEWAY { + return false, fmt.Errorf("Unexpected message type %v", typeId) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} + //Protocol that use stored message for ReadMessageBegin type storedMessageProtocol struct { TProtocol diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_go17.go deleted file mode 100644 index c71035e68..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_go17.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" - "strings" -) - -func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin() - if err != nil { - return false, err - } - if typeId != CALL && typeId != ONEWAY { - return false, fmt.Errorf("Unexpected message type %v", typeId) - } - //extract the service name - v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(v) != 2 { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(ctx, smb, out) - } - return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) - } - actualProcessor, ok := t.serviceProcessorMap[v[0]] - if !ok { - return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) - } - smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) - return actualProcessor.Process(ctx, smb, out) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_pre_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_pre_go17.go deleted file mode 100644 index 5c27b3875..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol_pre_go17.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" -) - -func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin() - if err != nil { - return false, err - } - if typeId != CALL && typeId != ONEWAY { - return false, fmt.Errorf("Unexpected message type %v", typeId) - } - //extract the service name - v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(v) != 2 { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(ctx, smb, out) - } - return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) - } - actualProcessor, ok := t.serviceProcessorMap[v[0]] - if !ok { - return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) - } - smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) - return actualProcessor.Process(ctx, smb, out) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/pre_go17.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/pre_go17.go deleted file mode 100644 index cb564b8db..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/pre_go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "golang.org/x/net/context" - -var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go index 9d645df24..e4b132b30 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go @@ -19,6 +19,18 @@ package thrift +import "context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + // The default processor factory just returns a singleton // instance. type TProcessorFactory interface { diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go index 25e6d24b9..615b7a4a8 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "errors" "fmt" ) @@ -74,7 +75,7 @@ type TProtocol interface { ReadBinary() (value []byte, err error) Skip(fieldType TType) (err error) - Flush() (err error) + Flush(ctx context.Context) (err error) Transport() TTransport } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_test.go index 2573312d1..e9118da88 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_test.go @@ -21,6 +21,7 @@ package thrift import ( "bytes" + "context" "io/ioutil" "math" "net" @@ -227,17 +228,17 @@ func ReadWriteBool(t testing.TB, p TProtocol, trans TTransport) { for k, v := range BOOL_VALUES { err = p.WriteBool(v) if err != nil { - t.Errorf("%s: %T %T %q Error writing bool in list at index %d: %q", "ReadWriteBool", p, trans, err, k, v) + t.Errorf("%s: %T %T %v Error writing bool in list at index %v: %v", "ReadWriteBool", p, trans, err, k, v) } } p.WriteListEnd() if err != nil { - t.Errorf("%s: %T %T %q Error writing list end: %q", "ReadWriteBool", p, trans, err, BOOL_VALUES) + t.Errorf("%s: %T %T %v Error writing list end: %v", "ReadWriteBool", p, trans, err, BOOL_VALUES) } - p.Flush() + p.Flush(context.Background()) thetype2, thelen2, err := p.ReadListBegin() if err != nil { - t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteBool", p, trans, err, BOOL_VALUES) + t.Errorf("%s: %T %T %v Error reading list: %v", "ReadWriteBool", p, trans, err, BOOL_VALUES) } _, ok := p.(*TSimpleJSONProtocol) if !ok { @@ -245,16 +246,16 @@ func ReadWriteBool(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteBool", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteBool", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteBool", p, trans, thelen, thelen2) } } for k, v := range BOOL_VALUES { value, err := p.ReadBool() if err != nil { - t.Errorf("%s: %T %T %q Error reading bool at index %d: %q", "ReadWriteBool", p, trans, err, k, v) + t.Errorf("%s: %T %T %v Error reading bool at index %v: %v", "ReadWriteBool", p, trans, err, k, v) } if v != value { - t.Errorf("%s: index %d %q %q %q != %q", "ReadWriteBool", k, p, trans, v, value) + t.Errorf("%s: index %v %v %v %v != %v", "ReadWriteBool", k, p, trans, v, value) } } err = p.ReadListEnd() @@ -280,7 +281,7 @@ func ReadWriteByte(t testing.TB, p TProtocol, trans TTransport) { if err != nil { t.Errorf("%s: %T %T %q Error writing list end: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES) } - err = p.Flush() + err = p.Flush(context.Background()) if err != nil { t.Errorf("%s: %T %T %q Error flushing list of bytes: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES) } @@ -294,7 +295,7 @@ func ReadWriteByte(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteByte", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteByte", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteByte", p, trans, thelen, thelen2) } } for k, v := range BYTE_VALUES { @@ -320,7 +321,7 @@ func ReadWriteI16(t testing.TB, p TProtocol, trans TTransport) { p.WriteI16(v) } p.WriteListEnd() - p.Flush() + p.Flush(context.Background()) thetype2, thelen2, err := p.ReadListBegin() if err != nil { t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI16", p, trans, err, INT16_VALUES) @@ -331,7 +332,7 @@ func ReadWriteI16(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI16", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteI16", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteI16", p, trans, thelen, thelen2) } } for k, v := range INT16_VALUES { @@ -357,7 +358,7 @@ func ReadWriteI32(t testing.TB, p TProtocol, trans TTransport) { p.WriteI32(v) } p.WriteListEnd() - p.Flush() + p.Flush(context.Background()) thetype2, thelen2, err := p.ReadListBegin() if err != nil { t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI32", p, trans, err, INT32_VALUES) @@ -368,7 +369,7 @@ func ReadWriteI32(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI32", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteI32", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteI32", p, trans, thelen, thelen2) } } for k, v := range INT32_VALUES { @@ -393,7 +394,7 @@ func ReadWriteI64(t testing.TB, p TProtocol, trans TTransport) { p.WriteI64(v) } p.WriteListEnd() - p.Flush() + p.Flush(context.Background()) thetype2, thelen2, err := p.ReadListBegin() if err != nil { t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI64", p, trans, err, INT64_VALUES) @@ -404,7 +405,7 @@ func ReadWriteI64(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI64", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteI64", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteI64", p, trans, thelen, thelen2) } } for k, v := range INT64_VALUES { @@ -429,28 +430,28 @@ func ReadWriteDouble(t testing.TB, p TProtocol, trans TTransport) { p.WriteDouble(v) } p.WriteListEnd() - p.Flush() + p.Flush(context.Background()) thetype2, thelen2, err := p.ReadListBegin() if err != nil { - t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteDouble", p, trans, err, DOUBLE_VALUES) + t.Errorf("%s: %T %T %v Error reading list: %v", "ReadWriteDouble", p, trans, err, DOUBLE_VALUES) } if thetype != thetype2 { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteDouble", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteDouble", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteDouble", p, trans, thelen, thelen2) } for k, v := range DOUBLE_VALUES { value, err := p.ReadDouble() if err != nil { - t.Errorf("%s: %T %T %q Error reading double at index %d: %q", "ReadWriteDouble", p, trans, err, k, v) + t.Errorf("%s: %T %T %q Error reading double at index %d: %v", "ReadWriteDouble", p, trans, err, k, v) } if math.IsNaN(v) { if !math.IsNaN(value) { - t.Errorf("%s: %T %T math.IsNaN(%q) != math.IsNaN(%q)", "ReadWriteDouble", p, trans, v, value) + t.Errorf("%s: %T %T math.IsNaN(%v) != math.IsNaN(%v)", "ReadWriteDouble", p, trans, v, value) } } else if v != value { - t.Errorf("%s: %T %T %v != %q", "ReadWriteDouble", p, trans, v, value) + t.Errorf("%s: %T %T %v != %v", "ReadWriteDouble", p, trans, v, value) } } err = p.ReadListEnd() @@ -467,7 +468,7 @@ func ReadWriteString(t testing.TB, p TProtocol, trans TTransport) { p.WriteString(v) } p.WriteListEnd() - p.Flush() + p.Flush(context.Background()) thetype2, thelen2, err := p.ReadListBegin() if err != nil { t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteString", p, trans, err, STRING_VALUES) @@ -478,7 +479,7 @@ func ReadWriteString(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T type %s != type %s", "ReadWriteString", p, trans, thetype, thetype2) } if thelen != thelen2 { - t.Errorf("%s: %T %T len %s != len %s", "ReadWriteString", p, trans, thelen, thelen2) + t.Errorf("%s: %T %T len %v != len %v", "ReadWriteString", p, trans, thelen, thelen2) } } for k, v := range STRING_VALUES { @@ -487,7 +488,7 @@ func ReadWriteString(t testing.TB, p TProtocol, trans TTransport) { t.Errorf("%s: %T %T %q Error reading string at index %d: %q", "ReadWriteString", p, trans, err, k, v) } if v != value { - t.Errorf("%s: %T %T %d != %d", "ReadWriteString", p, trans, v, value) + t.Errorf("%s: %T %T %v != %v", "ReadWriteString", p, trans, v, value) } } if err != nil { @@ -498,7 +499,7 @@ func ReadWriteString(t testing.TB, p TProtocol, trans TTransport) { func ReadWriteBinary(t testing.TB, p TProtocol, trans TTransport) { v := protocol_bdata p.WriteBinary(v) - p.Flush() + p.Flush(context.Background()) value, err := p.ReadBinary() if err != nil { t.Errorf("%s: %T %T Unable to read binary: %s", "ReadWriteBinary", p, trans, err.Error()) diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go index 771222999..1ff4d3754 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go @@ -19,6 +19,10 @@ package thrift +import ( + "context" +) + type TSerializer struct { Transport *TMemoryBuffer Protocol TProtocol @@ -38,35 +42,35 @@ func NewTSerializer() *TSerializer { protocol} } -func (t *TSerializer) WriteString(msg TStruct) (s string, err error) { +func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { t.Transport.Reset() if err = msg.Write(t.Protocol); err != nil { return } - if err = t.Protocol.Flush(); err != nil { + if err = t.Protocol.Flush(ctx); err != nil { return } - if err = t.Transport.Flush(); err != nil { + if err = t.Transport.Flush(ctx); err != nil { return } return t.Transport.String(), nil } -func (t *TSerializer) Write(msg TStruct) (b []byte, err error) { +func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { t.Transport.Reset() if err = msg.Write(t.Protocol); err != nil { return } - if err = t.Protocol.Flush(); err != nil { + if err = t.Protocol.Flush(ctx); err != nil { return } - if err = t.Transport.Flush(); err != nil { + if err = t.Transport.Flush(ctx); err != nil { return } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_test.go index 06d27a16b..32227ef49 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_test.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "errors" "fmt" "testing" @@ -88,7 +89,7 @@ func ProtocolTest1(test *testing.T, pf ProtocolFactory) (bool, error) { m.StringSet = make(map[string]struct{}, 5) m.E = 2 - s, err := t.WriteString(&m) + s, err := t.WriteString(context.Background(), &m) if err != nil { return false, errors.New(fmt.Sprintf("Unable to Serialize struct\n\t %s", err)) } @@ -122,7 +123,7 @@ func ProtocolTest2(test *testing.T, pf ProtocolFactory) (bool, error) { m.StringSet = make(map[string]struct{}, 5) m.E = 2 - s, err := t.WriteString(&m) + s, err := t.WriteString(context.Background(), &m) if err != nil { return false, errors.New(fmt.Sprintf("Unable to Serialize struct\n\t %s", err)) diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_types_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_types_test.go index e6f818083..c8e3b3be4 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_types_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer_types_test.go @@ -19,7 +19,7 @@ package thrift -// Autogenerated by Thrift Compiler (0.11.0) +// Autogenerated by Thrift Compiler (1.0.0-dev) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING /* THE FOLLOWING THRIFT FILE WAS USED TO CREATE THIS diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go index 735332231..2e8a71112 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go @@ -22,6 +22,7 @@ package thrift import ( "bufio" "bytes" + "context" "encoding/base64" "encoding/json" "fmt" @@ -552,7 +553,7 @@ func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { return v, p.ParsePostValue() } -func (p *TSimpleJSONProtocol) Flush() (err error) { +func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { return NewTProtocolException(p.writer.Flush()) } @@ -1064,7 +1065,7 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error { for _, char := range line { switch char { default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"", line, "\"") + e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) return NewTProtocolExceptionWithType(INVALID_DATA, e) case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): break diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol_test.go index 8f0dcc9df..7b98082a4 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol_test.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -37,7 +38,7 @@ func TestWriteSimpleJSONProtocolBool(t *testing.T) { if e := p.WriteBool(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -63,7 +64,7 @@ func TestReadSimpleJSONProtocolBool(t *testing.T) { } else { trans.Write(JSON_FALSE) } - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadBool() if e != nil { @@ -88,7 +89,7 @@ func TestWriteSimpleJSONProtocolByte(t *testing.T) { if e := p.WriteByte(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -110,7 +111,7 @@ func TestReadSimpleJSONProtocolByte(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(strconv.Itoa(int(value))) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadByte() if e != nil { @@ -135,7 +136,7 @@ func TestWriteSimpleJSONProtocolI16(t *testing.T) { if e := p.WriteI16(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -157,7 +158,7 @@ func TestReadSimpleJSONProtocolI16(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(strconv.Itoa(int(value))) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI16() if e != nil { @@ -182,7 +183,7 @@ func TestWriteSimpleJSONProtocolI32(t *testing.T) { if e := p.WriteI32(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -204,7 +205,7 @@ func TestReadSimpleJSONProtocolI32(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(strconv.Itoa(int(value))) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI32() if e != nil { @@ -228,7 +229,7 @@ func TestReadSimpleJSONProtocolI32Null(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(value) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI32() @@ -250,7 +251,7 @@ func TestWriteSimpleJSONProtocolI64(t *testing.T) { if e := p.WriteI64(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -272,7 +273,7 @@ func TestReadSimpleJSONProtocolI64(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(strconv.FormatInt(value, 10)) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI64() if e != nil { @@ -296,7 +297,7 @@ func TestReadSimpleJSONProtocolI64Null(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(value) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadI64() @@ -318,7 +319,7 @@ func TestWriteSimpleJSONProtocolDouble(t *testing.T) { if e := p.WriteDouble(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -355,7 +356,7 @@ func TestReadSimpleJSONProtocolDouble(t *testing.T) { p := NewTSimpleJSONProtocol(trans) n := NewNumericFromDouble(value) trans.WriteString(n.String()) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadDouble() if e != nil { @@ -394,7 +395,7 @@ func TestWriteSimpleJSONProtocolString(t *testing.T) { if e := p.WriteString(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -416,7 +417,7 @@ func TestReadSimpleJSONProtocolString(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(jsonQuote(value)) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadString() if e != nil { @@ -440,7 +441,7 @@ func TestReadSimpleJSONProtocolStringNull(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(value) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadString() if e != nil { @@ -464,7 +465,7 @@ func TestWriteSimpleJSONProtocolBinary(t *testing.T) { if e := p.WriteBinary(value); e != nil { t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) } - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) } s := trans.String() @@ -487,7 +488,7 @@ func TestReadSimpleJSONProtocolBinary(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(jsonQuote(b64String)) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() v, e := p.ReadBinary() if e != nil { @@ -516,7 +517,7 @@ func TestReadSimpleJSONProtocolBinaryNull(t *testing.T) { trans := NewTMemoryBuffer() p := NewTSimpleJSONProtocol(trans) trans.WriteString(value) - trans.Flush() + trans.Flush(context.Background()) s := trans.String() b, e := p.ReadBinary() v := string(b) @@ -542,7 +543,7 @@ func TestWriteSimpleJSONProtocolList(t *testing.T) { } } p.WriteListEnd() - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) } str := trans.String() @@ -596,7 +597,7 @@ func TestWriteSimpleJSONProtocolSet(t *testing.T) { } } p.WriteSetEnd() - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) } str := trans.String() @@ -653,12 +654,12 @@ func TestWriteSimpleJSONProtocolMap(t *testing.T) { } } p.WriteMapEnd() - if e := p.Flush(); e != nil { + if e := p.Flush(context.Background()); e != nil { t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) } str := trans.String() if str[0] != '[' || str[len(str)-1] != ']' { - t.Fatalf("Bad value for %s, wrote: %q, in go: %q", thetype, str, DOUBLE_VALUES) + t.Fatalf("Bad value for %s, wrote: %v, in go: %v", thetype, str, DOUBLE_VALUES) } l := strings.Split(str[1:len(str)-1], ",") if len(l) < 3 { diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go index 383b1fe3e..885427965 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "net" "time" ) @@ -148,7 +149,7 @@ func (p *TSocket) Write(buf []byte) (int, error) { return p.conn.Write(buf) } -func (p *TSocket) Flush() error { +func (p *TSocket) Flush(ctx context.Context) error { return nil } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go index c3bd72cc4..ba6337726 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "crypto/tls" "net" "time" @@ -158,7 +159,7 @@ func (p *TSSLSocket) Write(buf []byte) (int, error) { return p.conn.Write(buf) } -func (p *TSSLSocket) Flush() error { +func (p *TSSLSocket) Flush(ctx context.Context) error { return nil } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go index 70a85a848..ba2738a8d 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "errors" "io" ) @@ -30,6 +31,10 @@ type Flusher interface { Flush() (err error) } +type ContextFlusher interface { + Flush(ctx context.Context) (err error) +} + type ReadSizeProvider interface { RemainingBytes() (num_bytes uint64) } @@ -37,7 +42,7 @@ type ReadSizeProvider interface { // Encapsulates the I/O layer type TTransport interface { io.ReadWriteCloser - Flusher + ContextFlusher ReadSizeProvider // Opens the transport for communication @@ -60,6 +65,6 @@ type TRichTransport interface { io.ByteReader io.ByteWriter stringWriter - Flusher + ContextFlusher ReadSizeProvider } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_test.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_test.go index 864958a9d..01278038e 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_test.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_test.go @@ -20,6 +20,7 @@ package thrift import ( + "context" "io" "net" "strconv" @@ -54,7 +55,7 @@ func TransportTest(t *testing.T, writeTrans TTransport, readTrans TTransport) { if err != nil { t.Fatalf("Transport %T cannot write binary data of length %d: %s", writeTrans, len(transport_bdata), err) } - err = writeTrans.Flush() + err = writeTrans.Flush(context.Background()) if err != nil { t.Fatalf("Transport %T cannot flush write of binary data: %s", writeTrans, err) } @@ -74,7 +75,7 @@ func TransportTest(t *testing.T, writeTrans TTransport, readTrans TTransport) { if err != nil { t.Fatalf("Transport %T cannot write binary data 2 of length %d: %s", writeTrans, len(transport_bdata), err) } - err = writeTrans.Flush() + err = writeTrans.Flush(context.Background()) if err != nil { t.Fatalf("Transport %T cannot flush write binary data 2: %s", writeTrans, err) } @@ -113,7 +114,7 @@ func TransportHeaderTest(t *testing.T, writeTrans TTransport, readTrans TTranspo if err != nil { t.Fatalf("Transport %T cannot write binary data of length %d: %s", writeTrans, len(transport_bdata), err) } - err = writeTrans.Flush() + err = writeTrans.Flush(context.Background()) if err != nil { t.Fatalf("Transport %T cannot flush write of binary data: %s", writeTrans, err) } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go index f2f073222..f3d42673a 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go @@ -21,6 +21,7 @@ package thrift import ( "compress/zlib" + "context" "io" "log" ) @@ -91,11 +92,11 @@ func (z *TZlibTransport) Close() error { } // Flush flushes the writer and its underlying transport. -func (z *TZlibTransport) Flush() error { +func (z *TZlibTransport) Flush(ctx context.Context) error { if err := z.writer.Flush(); err != nil { return err } - return z.transport.Flush() + return z.transport.Flush(ctx) } // IsOpen returns true if the transport is open diff --git a/vendor/git.apache.org/thrift.git/lib/haxe/README.md b/vendor/git.apache.org/thrift.git/lib/haxe/README.md index ac70f253a..19bbbaabf 100644 --- a/vendor/git.apache.org/thrift.git/lib/haxe/README.md +++ b/vendor/git.apache.org/thrift.git/lib/haxe/README.md @@ -69,10 +69,10 @@ Thrift Haxe bindings Thrift Haxe bindings can be set up via the `haxelib` tool either from the official ASF repo, or via the github mirror. -- To set up any **stable version**, choose the appropriate branch (e.g. `0.11.0`): +- To set up any **stable version**, choose the appropriate branch (e.g. `0.10.0`): - - `haxelib git thrift https://git.apache.org/thrift.git 0.11.0 lib/haxe` - - `haxelib git thrift https://github.com/apache/thrift.git 0.11.0 lib/haxe` + - `haxelib git thrift https://git.apache.org/thrift.git 0.10.0 lib/haxe` + - `haxelib git thrift https://github.com/apache/thrift.git 0.10.0 lib/haxe` - To set up the current **development version**, use the `master` branch: diff --git a/vendor/git.apache.org/thrift.git/lib/haxe/haxelib.json b/vendor/git.apache.org/thrift.git/lib/haxe/haxelib.json index 26caa7967..f76990e8b 100644 --- a/vendor/git.apache.org/thrift.git/lib/haxe/haxelib.json +++ b/vendor/git.apache.org/thrift.git/lib/haxe/haxelib.json @@ -4,7 +4,7 @@ "license": "Apache", "tags": ["thrift", "rpc", "serialization", "cross", "framework"], "description": "Haxe bindings for the Apache Thrift RPC and serialization framework", - "version": "0.11.0", + "version": "1.0.0-dev", "releasenote": "Licensed under Apache License, Version 2.0. The Apache Thrift compiler needs to be installed separately.", "contributors": ["Apache Software Foundation (ASF)"], "dependencies": { }, diff --git a/vendor/git.apache.org/thrift.git/lib/hs/thrift.cabal b/vendor/git.apache.org/thrift.git/lib/hs/thrift.cabal index 583067953..4e9cb189c 100644 --- a/vendor/git.apache.org/thrift.git/lib/hs/thrift.cabal +++ b/vendor/git.apache.org/thrift.git/lib/hs/thrift.cabal @@ -18,7 +18,7 @@ -- Name: thrift -Version: 0.11.0 +Version: 1.0.0-dev Cabal-Version: >= 1.8 License: OtherLicense Category: Foreign diff --git a/vendor/git.apache.org/thrift.git/lib/java/CMakeLists.txt b/vendor/git.apache.org/thrift.git/lib/java/CMakeLists.txt index f7a1a63aa..46064e600 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/CMakeLists.txt +++ b/vendor/git.apache.org/thrift.git/lib/java/CMakeLists.txt @@ -17,19 +17,14 @@ # under the License. # -file(GLOB java_sources src/**/*.java) - if(ANDROID) - set(android_sources - android/build.gradle - android/settings.gradle - android/src/main/AndroidManifest.xml - ) set(THRIFT_AAR outputs/aar/thrift-debug.aar outputs/aar/thrift-release.aar) add_custom_command( OUTPUT ${THRIFT_AAR} - COMMAND ${GRADLE_EXECUTABLE} -p"${CMAKE_CURRENT_SOURCE_DIR}/android" -PbuildDir="${CMAKE_CURRENT_BINARY_DIR}" assemble - DEPENDS ${java_sources} ${android_sources}) + COMMAND ${GRADLE_EXECUTABLE} + -p "${CMAKE_CURRENT_SOURCE_DIR}/android" + "-PbuildDir=${CMAKE_CURRENT_BINARY_DIR}/android/build" assemble + ) add_custom_target(thrift_aar ALL DEPENDS ${THRIFT_AAR}) else(ANDROID) @@ -46,35 +41,47 @@ else(ANDROID) set(JAVA_DOC_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/${DOC_INSTALL_DIR}/java") endif() - file(GLOB_RECURSE ThriftJava_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/*.java) - set(ThriftJava_ARTIFACTS - libthrift-${thrift_VERSION}.jar - libthrift-${thrift_VERSION}.pom - ) - add_custom_command( - OUTPUT ${ThriftJava_ARTIFACTS} - COMMAND ${Ant_EXECUTABLE} ${ANT_FLAGS} -Dbuild.dir="${CMAKE_CURRENT_BINARY_DIR}" -f build.xml - MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/build.xml - DEPENDS ${ThriftJava_SOURCES} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - ) - add_custom_target(ThriftJava ALL - COMMENT "Building Java library using Ant" - DEPENDS ${ThriftJava_ARTIFACTS} + COMMENT "Building Java library using Gradle Wrapper" + COMMAND ${GRADLEW_EXECUTABLE} ${GRADLE_OPTS} assemble + --console=plain --no-daemon + -Prelease=true + -Pthrift.version=${thrift_VERSION} + "-Pbuild.dir=${CMAKE_CURRENT_BINARY_DIR}/build" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) - # Hook the ant install task into CMake install - install(CODE "execute_process( - COMMAND ${Ant_EXECUTABLE} ${ANT_FLAGS} install - -Dbuild.dir=\"${CMAKE_CURRENT_BINARY_DIR}\" - -Dinstall.path=\"${JAVA_INSTALL_DIR}\" -Dinstall.javadoc.path=\"${JAVA_DOC_INSTALL_DIR}\" -f build.xml + # Enable publishing from CMake if the publishing information is provided + add_custom_target(MavenPublish + COMMENT "Publishing Java Library to Apache Maven staging" + COMMAND ${GRADLEW_EXECUTABLE} ${GRADLE_OPTS} clean uploadArchives + --console=plain --no-daemon + -Prelease=true + -Pthrift.version=${thrift_VERSION} + "-Pbuild.dir=${CMAKE_CURRENT_BINARY_DIR}/build" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - )") + ) + + # Hook the CMake install process to the results from make ALL. + # This works best when 'make all && sudo make install/fast' is used. + # Using slash to end the source location to avoid copying the directory path. + install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/build/libs/ + DESTINATION ${JAVA_INSTALL_DIR} + FILES_MATCHING PATTERN "libthrift-${thrift_VERSION}.jar") + install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/build/deps/ + DESTINATION ${JAVA_INSTALL_DIR} + FILES_MATCHING PATTERN "*.jar") + install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/build/docs/javadoc/ + DESTINATION ${JAVA_DOC_INSTALL_DIR}) if(BUILD_TESTING) add_test(NAME JavaTest - COMMAND ${Ant_EXECUTABLE} ${ANT_FLAGS} -Dbuild.dir=${CMAKE_CURRENT_BINARY_DIR} -Dthrift.compiler=${THRIFT_COMPILER} -f build.xml test - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + COMMAND ${GRADLEW_EXECUTABLE} ${GRADLE_OPTS} test + --console=plain --no-daemon + -Prelease=true + -Pthrift.version=${thrift_VERSION} + "-Pbuild.dir=${CMAKE_CURRENT_BINARY_DIR}/build" + "-Pthrift.compiler=${THRIFT_COMPILER}" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endif() endif(ANDROID) diff --git a/vendor/git.apache.org/thrift.git/lib/java/Makefile.am b/vendor/git.apache.org/thrift.git/lib/java/Makefile.am index 1c867ae67..65981ca68 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/java/Makefile.am @@ -20,29 +20,53 @@ export CLASSPATH all-local: - $(ANT) $(ANT_FLAGS) + ./gradlew $(GRADLE_OPTS) assemble \ + -Prelease=true \ + -Pthrift.version=$(PACKAGE_VERSION) \ + --console=plain install-exec-hook: - $(ANT) $(ANT_FLAGS) install -Dinstall.path=$(DESTDIR)$(JAVA_PREFIX) \ - -Dinstall.javadoc.path=$(DESTDIR)$(docdir)/java + ./gradlew $(GRADLE_OPTS) install \ + -Prelease=true \ + -Pinstall.path=$(DESTDIR)$(JAVA_PREFIX) \ + -Pinstall.javadoc.path=$(DESTDIR)$(docdir)/java \ + -Pthrift.version=$(PACKAGE_VERSION) \ + --console=plain -# Make sure this doesn't fail if ant is not configured. clean-local: - ANT=$(ANT) ; if test -z "$$ANT" ; then ANT=: ; fi ; \ - $$ANT $(ANT_FLAGS) clean + ./gradlew $(GRADLE_OPTS) clean --console=plain precross: $(THRIFT) - $(ANT) $(ANT_FLAGS) compile-test + ./gradlew $(GRADLE_OPTS) shadowJar \ + -Prelease=true \ + -Pthrift.version=$(PACKAGE_VERSION) \ + -Pthrift.compiler=$(THRIFT) \ + --console=plain -check-local: all - $(ANT) $(ANT_FLAGS) test +check-local: $(THRIFT) + ./gradlew $(GRADLE_OPTS) test \ + -Prelease=true \ + -Pthrift.version=$(PACKAGE_VERSION) \ + -Pthrift.compiler=$(THRIFT) \ + --console=plain + +maven-publish: + ./gradlew $(GRADLE_OPTS) uploadArchives \ + -Prelease=true \ + -Pthrift.version=$(PACKAGE_VERSION) \ + --console=plain EXTRA_DIST = \ - build.xml \ - build.properties \ + build.gradle \ + gradle.properties \ + settings.gradle \ + gradle \ + gradlew \ + gradlew.bat \ CMakeLists.txt \ coding_standards.md \ android \ src \ test \ + code_quality_tools \ README.md diff --git a/vendor/git.apache.org/thrift.git/lib/java/README.md b/vendor/git.apache.org/thrift.git/lib/java/README.md index 66bcfa554..1e4aed22a 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/README.md +++ b/vendor/git.apache.org/thrift.git/lib/java/README.md @@ -20,18 +20,59 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -Using Thrift with Java -====================== +Building and installing from source +=================================== + +When using a CMake build from the source distribution on Linux the +easiest way to build and install is this simple command line: + + make all && sudo make install/fast + +It is important to use the install/fast option to eliminate +the automatic rebuild by dependency that causes issues because +the build tooling is designed to work with cached files in the +user home directory during the build process. Instead this builds +the code in the expected local build tree and then uses CMake +install code to copy to the target destination. + +Building Thrift with Gradle without CMake/Autoconf +================================================== The Thrift Java source is not build using the GNU tools, but rather uses -the Apache Ant build system, which tends to be predominant amongst Java +the Gradle build system, which tends to be predominant amongst Java developers. To compile the Java Thrift libraries, simply do the following: - ant + ./gradlew -Yep, that's easy. Look for libthrift.jar in the base directory. +Yep, that's easy. Look for libthrift-.jar in the build/libs directory. + +The default build will run the unit tests which expect a usable +Thrift compiler to exist on the system. You have two choices for +that. + +* Build the Thrift executable from source at the default + location in the source tree. The project is configured + to look for it there. +* Install the published binary distribution to have Thrift + executable in a known location and add the path to the + ~/.gradle/gradle.properties file using the property name + "thrift.compiler". For example this would set the path in + a Windows box if Thrift was installed under C:\Thrift + + thrift.compiler=C:/Thrift/thrift.exe + +To just build the library without running unit tests you simply do this. + + ./gradlew assemble + +To install the library in the local Maven repository location +where other Maven or Gradle builds can reference it simply do this. + + ./gradlew install + +The library will be placed in your home directory under .m2/repository To include Thrift in your applications simply add libthrift.jar to your classpath, or install if in your default system classpath of choice. @@ -39,15 +80,84 @@ classpath, or install if in your default system classpath of choice. Build Thrift behind a proxy: - ant -Dproxy.enabled=1 -Dproxy.host=myproxyhost -Dproxy.user=thriftuser -Dproxy.pass=topsecret + ./gradlew -Dhttp.proxyHost=myproxyhost -Dhttp.proxyPort=8080 -Dhttp.proxyUser=thriftuser -Dhttp.proxyPassword=topsecret or via - ./configure --with-java ANT_FLAGS='-Dproxy.enabled=1 -Dproxy.host=myproxyhost -Dproxy.user=thriftuser -Dproxy.pass=topsecret' + ./configure --with-java GRADLE_OPTS='-Dhttp.proxyHost=myproxyhost -Dhttp.proxyPort=8080 -Dhttp.proxyUser=thriftuser -Dhttp.proxyPassword=topsecret' + + +Unit Test HTML Reports +====================== + +The build will automatically generate an HTML Unit Test report. This can be found +under build/reports/tests/test/index.html. It can be viewed with a browser +directly from that location. + + +Clover Code Coverage for Thrift +=============================== + +The build will optionally generate Clover Code coverage if the Gradle property +`cloverEnabled=true` is set in ~/.gradle/gradle.properties or on the command line +via `-PcloverEnabled=true`. The generated report can be found under the location +build/reports/clover/html/index.html. It can be viewed with a browser +directly from that location. Additionally, a PDF report is generated and is found +under the location build/reports/clover/clover.pdf. + +The following command will build, unit test, and generate Clover reports: + + ./gradlew -PcloverEnabled=true + + +Publishing Maven Artifacts to Maven Central +=========================================== + +The Automake build generates a Makefile that provides the correct parameters +when you run the build provided the configure.ac has been set with the correct +version number. The Gradle build will receive the correct value for the build. +The same applies to the CMake build, the value from the configure.ac file will +be used if you execute these commands: + + make maven-publish -- This is for an Automake Linux build + make MavenPublish -- This is for a CMake generated build + +The uploadArchives task in Gradle is preconfigured with all necessary details +to sign and publish the artifacts from the build to the Apache Maven staging +repository. The task requires the following externally provided properties to +authenticate to the repository and sign the artifacts. The preferred approach +is to create or edit the ~/.gradle/gradle.properties file and add the following +properties to it. + + # Signing key information for artifacts PGP signature (values are examples) + signing.keyId=24875D73 + signing.password=secret + signing.secretKeyRingFile=/Users/me/.gnupg/secring.gpg + + # Apache Maven staging repository user credentials + mavenUser=meMyselfAndI + mavenPassword=MySuperAwesomeSecretPassword + +It is also possible to manually publish using the Gradle build directly. +With the key information and credentials in place the following will generate +if needed the build artifacts and proceed to publish the results. + + ./gradlew -Prelease=true -Pthrift.version=0.11.0 uploadArchives + +It is also possible to override the target repository for the Maven Publication +by using a Gradle property, for example you can publish signed JAR files to your +company internal server if you add this to the command line or in the +~/.gradle/gradle.properties file. The URL below assumes a Nexus Repository. + + maven-repository-url=https://my.company.com/service/local/staging/deploy/maven2 + +Or the same on the command line: + + ./gradlew -Pmaven-repository-url=https://my.company.com/service/local/staging/deploy/maven2 -Prelease=true -Pthrift.version=0.11.0 uploadArchives Dependencies ============ -Apache Ant -http://ant.apache.org/ +Gradle +http://gradle.org/ diff --git a/vendor/git.apache.org/thrift.git/lib/java/android/build.gradle b/vendor/git.apache.org/thrift.git/lib/java/android/build.gradle index 4aa2864ee..c9984237b 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/android/build.gradle +++ b/vendor/git.apache.org/thrift.git/lib/java/android/build.gradle @@ -20,31 +20,31 @@ apply plugin: 'com.android.library' android { - compileSdkVersion 23 - buildToolsVersion "23.0.1" - useLibrary 'org.apache.http.legacy' - sourceSets.main.java { - srcDir '../src' - exclude 'org/apache/thrift/transport/TSaslClientTransport.java' - exclude 'org/apache/thrift/transport/TSaslServerTransport.java' - exclude 'org/apache/thrift/transport/TSaslTransport.java' - } + compileSdkVersion 23 + buildToolsVersion "23.0.1" + useLibrary 'org.apache.http.legacy' + sourceSets.main.java { + srcDir '../src' + exclude 'org/apache/thrift/transport/TSaslClientTransport.java' + exclude 'org/apache/thrift/transport/TSaslServerTransport.java' + exclude 'org/apache/thrift/transport/TSaslTransport.java' + } } repositories { - mavenCentral() + mavenCentral() } dependencies { - compile 'org.slf4j:slf4j-api:1.7.13' - compile 'javax.servlet:servlet-api:2.5' - compile 'org.apache.httpcomponents:httpcore:4.4.4' + compile 'org.slf4j:slf4j-api:1.7.13' + compile 'javax.servlet:servlet-api:2.5' + compile 'org.apache.httpcomponents:httpcore:4.4.4' } buildscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.android.tools.build:gradle:1.5.0' - } + repositories { + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:1.5.0' + } } diff --git a/vendor/git.apache.org/thrift.git/lib/java/build.gradle b/vendor/git.apache.org/thrift.git/lib/java/build.gradle new file mode 100644 index 000000000..4302f779d --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/build.gradle @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Using the legacy plugin classpath for Clover so it can be loaded optionally +buildscript { + repositories { + google() + jcenter() + gradlePluginPortal() + } + + dependencies { + classpath 'com.bmuschko:gradle-clover-plugin:2.2.0' + } +} + +plugins { + id 'java' + id 'maven' + id 'signing' + id 'com.github.johnrengelman.shadow' version '2.0.2' +} + +description = 'Apache Thrift Java Library' + +defaultTasks 'build' + +// Version components for this project +group = property('thrift.groupid') + +// Drop the -dev suffix, we use the SNAPSHOT suffix for non-release versions +def parsedVersion = property('thrift.version').toString().replace('-dev', '') +if (Boolean.parseBoolean(project.release)) { + version = parsedVersion +} else { + version = parsedVersion + '-SNAPSHOT' +} + +// Keeping the rest of the build logic in functional named scripts for clarity +apply from: 'gradle/environment.gradle' +apply from: 'gradle/sourceConfiguration.gradle' +apply from: 'gradle/additionalArtifacts.gradle' +apply from: 'gradle/generateTestThrift.gradle' +apply from: 'gradle/unitTests.gradle' +apply from: 'gradle/cloverCoverage.gradle' +apply from: 'gradle/functionalTests.gradle' +apply from: 'gradle/publishing.gradle' +apply from: 'gradle/codeQualityChecks.gradle' diff --git a/vendor/git.apache.org/thrift.git/lib/java/build.xml b/vendor/git.apache.org/thrift.git/lib/java/build.xml deleted file mode 100644 index 512aec75a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/java/build.xml +++ /dev/null @@ -1,421 +0,0 @@ - - - - - Thrift Build File - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Tests failed! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/git.apache.org/thrift.git/lib/java/code_quality_tools/findbugs-filter.xml b/vendor/git.apache.org/thrift.git/lib/java/code_quality_tools/findbugs-filter.xml new file mode 100644 index 000000000..8a93b0ad0 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/code_quality_tools/findbugs-filter.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/git.apache.org/thrift.git/lib/java/build.properties b/vendor/git.apache.org/thrift.git/lib/java/gradle.properties similarity index 55% rename from vendor/git.apache.org/thrift.git/lib/java/build.properties rename to vendor/git.apache.org/thrift.git/lib/java/gradle.properties index fd3535186..e99788f0d 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/build.properties +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle.properties @@ -1,31 +1,33 @@ -thrift.version=0.11.0 +# This file is shared currently between this Gradle build and the +# Ant builds for fd303 and JavaScript. Keep the dotted notation for +# the properties to minimize the changes in the dependencies. +thrift.version=1.0.0 thrift.groupid=org.apache.thrift release=false -# Jar Versions -mvn.ant.task.version=2.1.3 - # Local Install paths install.path=/usr/local/lib -install.javadoc.path=${install.path} +install.javadoc.path=/usr/local/lib + +# Test execution properties +testPort=9090 + +# Test with Clover Code coverage (disabled by default) +cloverEnabled=false # Maven dependency download locations mvn.repo=http://repo1.maven.org/maven2 apache.repo=https://repository.apache.org/content/repositories/releases -mvn.ant.task.url=${mvn.repo}/org/apache/maven/maven-ant-tasks/${mvn.ant.task.version} -mvn.ant.task.jar=maven-ant-tasks-${mvn.ant.task.version}.jar -# Apache Maven publish +# Apache Maven publish license=http://www.apache.org/licenses/LICENSE-2.0.txt maven-repository-url=https://repository.apache.org/service/local/staging/deploy/maven2 maven-repository-id=apache.releases.https -# Jar Versions -mvn.ant.task.version=2.1.3 - # Dependency versions httpclient.version=4.4.1 httpcore.version=4.4.1 slf4j.version=1.7.12 servlet.version=2.5 - +junit.version=4.12 +mockito.version=1.9.5 diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/common_test_pre_go17.go b/vendor/git.apache.org/thrift.git/lib/java/gradle/additionalArtifacts.gradle similarity index 61% rename from vendor/git.apache.org/thrift.git/lib/go/thrift/common_test_pre_go17.go rename to vendor/git.apache.org/thrift.git/lib/java/gradle/additionalArtifacts.gradle index e6d0c4d9a..201469da1 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/common_test_pre_go17.go +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/additionalArtifacts.gradle @@ -1,5 +1,3 @@ -// +build !go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -19,14 +17,24 @@ * under the License. */ -package thrift +// Following Gradle best practices to keep build logic organized -import "golang.org/x/net/context" +task sourcesJar(type: Jar, group: 'Build') { + description = 'Assembles a jar archive containing the main Java sources.' -type mockProcessor struct { - ProcessFunc func(in, out TProtocol) (bool, TException) + classifier 'sources' + from sourceSets.main.allSource } -func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - return m.ProcessFunc(in, out) +task javadocJar(type: Jar, dependsOn: javadoc, group: 'Build') { + description = 'Assembles a jar archive containing the JavaDoc.' + + classifier 'javadoc' + from javadoc.destinationDir } + +artifacts { + archives sourcesJar + archives javadocJar +} + diff --git a/vendor/git.apache.org/thrift.git/lib/go/test/tests/go17.go b/vendor/git.apache.org/thrift.git/lib/java/gradle/cloverCoverage.gradle similarity index 55% rename from vendor/git.apache.org/thrift.git/lib/go/test/tests/go17.go rename to vendor/git.apache.org/thrift.git/lib/java/gradle/cloverCoverage.gradle index dc3c9d598..cef0e79b1 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/test/tests/go17.go +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/cloverCoverage.gradle @@ -1,5 +1,3 @@ -// +build go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -19,29 +17,32 @@ * under the License. */ -package tests +// Following Gradle best practices to keep build logic organized -import ( - "context" - "fmt" -) +// Keep this as an optional feature for now, disabled by default +if (Boolean.parseBoolean(project.cloverEnabled)) { + apply plugin: 'com.bmuschko.clover' -var defaultCtx = context.Background() + dependencies { + clover 'org.openclover:clover:4.2.+' + } -type FirstImpl struct{} + clover { -func (f *FirstImpl) ReturnOne(ctx context.Context) (r int64, err error) { - return 1, nil + testIncludes = ['**/Test*.java'] + // Exclude the generated test code from code coverage + testExcludes = ['thrift/test/Test*.java'] + + compiler { + encoding = 'UTF-8' + debug = true + } + + report { + html = true + pdf = true + } + } + + build.dependsOn cloverGenerateReport } - -type SecondImpl struct{} - -func (s *SecondImpl) ReturnTwo(ctx context.Context) (r int64, err error) { - return 2, nil -} - -type impl struct{} - -func (i *impl) Hi(ctx context.Context, in int64, s string) (err error) { fmt.Println("Hi!"); return } -func (i *impl) Emptyfunc(ctx context.Context) (err error) { return } -func (i *impl) EchoInt(ctx context.Context, param int64) (r int64, err error) { return param, nil } diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/codeQualityChecks.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/codeQualityChecks.gradle new file mode 100644 index 000000000..9572ca177 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/codeQualityChecks.gradle @@ -0,0 +1,40 @@ + +// ================================================================= +// Configure the Gradle code quality plugins here. +// + +apply plugin: 'findbugs' + +findbugs { + ignoreFailures = true + toolVersion = '3.0.1' + sourceSets = [ sourceSets.main ] + effort = 'max' + reportLevel = 'low' + excludeFilter = file('code_quality_tools/findbugs-filter.xml') +} + +tasks.withType(FindBugs) { + reports { + text.enabled = false + html.enabled = true + xml.enabled = false + } +} + +apply plugin: 'pmd' + +pmd { + ignoreFailures = true + toolVersion = '6.0.0' + sourceSets = [ sourceSets.main ] + targetJdk = sourceCompatibility + ruleSets = [ 'java-basic' ] +} + +tasks.withType(Pmd) { + reports { + html.enabled = true + xml.enabled = false + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/environment.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/environment.gradle new file mode 100644 index 000000000..9b7eb1ed6 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/environment.gradle @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Following Gradle best practices to keep build logic organized + +// Override the build directory if CMake is used (allows for out-of-tree-builds) +if (hasProperty('build.dir')) { + buildDir = file(property('build.dir')) +} + +// In order to remain compatible with other Ant based builds in the system +// we convert the gradle.properties into DSL friendly camelCased properties +ext.installPath = property('install.path') +ext.installJavadocPath = property('install.javadoc.path') + +ext.thriftRoot = file('../..') + +if (hasProperty('thrift.compiler')) { + ext.thriftCompiler = property('thrift.compiler') +} else { + ext.thriftCompiler = "$thriftRoot/compiler/cpp/thrift" +} + +ext.mvnRepo = property('mvn.repo') +ext.apacheRepo = property('apache.repo') +ext.mavenRepositoryUrl = property('maven-repository-url') + +// Versions used in this project +ext.httpclientVersion = property('httpclient.version') +ext.httpcoreVersion = property('httpcore.version') +ext.servletVersion = property('servlet.version') +ext.slf4jVersion = property('slf4j.version') +ext.junitVersion = property('junit.version') +ext.mockitoVersion = property('mockito.version') + +// In this section you declare where to find the dependencies of your project +repositories { + maven { + name 'Maven Central Repository' + url mvnRepo + } + maven { + name 'Apache Maven Repository' + url apacheRepo + } +} + +dependencies { + compile "org.slf4j:slf4j-api:${slf4jVersion}" + compile "org.apache.httpcomponents:httpclient:${httpclientVersion}" + compile "org.apache.httpcomponents:httpcore:${httpcoreVersion}" + compile "javax.servlet:servlet-api:${servletVersion}" + + testCompile "junit:junit:${junitVersion}" + testCompile "org.mockito:mockito-all:${mockitoVersion}" + testRuntime "org.slf4j:slf4j-log4j12:${slf4jVersion}" +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/functionalTests.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/functionalTests.gradle new file mode 100644 index 000000000..c420d122c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/functionalTests.gradle @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Following Gradle best practices to keep build logic organized + +// ---------------------------------------------------------------------------- +// Functional testing harness creation. This helps run the cross-check tests. +// The Makefile precross target invokes the shadowJar task and the tests.json +// code is changed to call runclient or runserver as needed. + +// ---------------------------------------------------------------------------- +// Cross Test sources are separated in their own sourceSet +// +sourceSets { + crossTest { + java { + srcDir 'test' + include '**/test/TestClient.java' + include '**/test/TestServer.java' + include '**/test/TestNonblockingServer.java' + } + } +} + +configurations { + crossTestCompile { extendsFrom testCompile } + crossTestRuntime { extendsFrom crossTestCompile, testRuntime } +} + +dependencies { + crossTestCompile sourceSets.main.output + crossTestCompile sourceSets.test.output +} + +// I am using shadow plugin to make a self contained functional test Uber JAR that +// eliminates startup problems with wrapping the cross-check harness in Gradle. +// This is used by the runner scripts as the single classpath entry which +// allows the process to be as lightweight as it can. +shadowJar { + description = 'Assemble a test JAR file for cross-check execution' + // make sure the runners are created when this runs + dependsOn 'generateRunnerScriptForClient', 'generateRunnerScriptForServer', 'generateRunnerScriptForNonblockingServer' + + baseName = 'functionalTest' + destinationDir = file("$buildDir/functionalTestJar") + classifier = null + + // We do not need a version number for this internal jar + version = null + + // Bundle the complete set of unit test classes including generated code + // and the runtime dependencies in one JAR to expedite execution. + from sourceSets.test.output + from sourceSets.crossTest.output + configurations = [project.configurations.testRuntime] +} + +// Common script runner configuration elements +def scriptExt = '' +def execExt = '' +def scriptHead = '#!/bin/bash' +def args = '$*' + +// Although this is marked internal it is an available and stable interface +if (org.gradle.internal.os.OperatingSystem.current().windows) { + scriptExt = '.bat' + execExt = '.exe' + scriptHead = '@echo off' + args = '%*' +} + +// The Java executable to use with the runner scripts +def javaExe = file("${System.getProperty('java.home')}/bin/java${execExt}").canonicalPath +// The common Uber jar path +def jarPath = shadowJar.archivePath.canonicalPath +def trustStore = file('test/.truststore').canonicalPath +def keyStore = file('test/.keystore').canonicalPath + +task generateRunnerScriptForClient(group: 'Build') { + description = 'Generate a runner script for cross-check tests with TestClient' + + def clientFile = file("$buildDir/runclient${scriptExt}") + + def runClientText = """\ +${scriptHead} + +"${javaExe}" -cp "$jarPath" "-Djavax.net.ssl.trustStore=$trustStore" -Djavax.net.ssl.trustStorePassword=thrift org.apache.thrift.test.TestClient $args +""" + inputs.property 'runClientText', runClientText + outputs.file clientFile + + doLast { + clientFile.parentFile.mkdirs() + clientFile.text = runClientText + clientFile.setExecutable(true, false) + } +} + +task generateRunnerScriptForServer(group: 'Build') { + description = 'Generate a runner script for cross-check tests with TestServer' + + def serverFile = file("$buildDir/runserver${scriptExt}") + + def runServerText = """\ +${scriptHead} + +"${javaExe}" -cp "$jarPath" "-Djavax.net.ssl.keyStore=$keyStore" -Djavax.net.ssl.keyStorePassword=thrift org.apache.thrift.test.TestServer $args +""" + + inputs.property 'runServerText', runServerText + outputs.file serverFile + + doLast { + serverFile.parentFile.mkdirs() + serverFile.text = runServerText + serverFile.setExecutable(true, false) + } +} + +task generateRunnerScriptForNonblockingServer(group: 'Build') { + description = 'Generate a runner script for cross-check tests with TestNonblockingServer' + + def serverFile = file("$buildDir/runnonblockingserver${scriptExt}") + + def runServerText = """\ +${scriptHead} + +"${javaExe}" -cp "$jarPath" "-Djavax.net.ssl.keyStore=$keyStore" -Djavax.net.ssl.keyStorePassword=thrift org.apache.thrift.test.TestNonblockingServer $args +""" + + inputs.property 'runServerText', runServerText + outputs.file serverFile + + doLast { + serverFile.parentFile.mkdirs() + serverFile.text = runServerText + serverFile.setExecutable(true, false) + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/generateTestThrift.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/generateTestThrift.gradle new file mode 100644 index 000000000..c3479179b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/generateTestThrift.gradle @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Following Gradle best practices to keep build logic organized + +// Generated code locations for Unit tests +ext.genSrc = file("$buildDir/gen-java") +ext.genBeanSrc = file("$buildDir/gen-javabean") +ext.genReuseSrc = file("$buildDir/gen-javareuse") +ext.genFullCamelSrc = file("$buildDir/gen-fullcamel") + +// Add the generated code directories to the test source set +sourceSets { + test.java.srcDirs genSrc, genBeanSrc, genReuseSrc, genFullCamelSrc +} + +// ---------------------------------------------------------------------------- +// Code generation for Unit Testing + +// A callable closure to make this easier +ext.thriftCompile = { Task task, String thriftFileName, String generator = 'java', File outputDir = genSrc -> + def thriftFile = file("$thriftRoot/test/$thriftFileName") + assert thriftFile.exists() + + task.inputs.file thriftFile + task.outputs.dir outputDir + + task.doLast { + outputDir.mkdirs() + def result = exec { + executable file(thriftCompiler) + args '--gen', generator + args '-out', outputDir + args thriftFile + standardOutput = task.outputBuffer + errorOutput = task.outputBuffer + ignoreExitValue = true + } + if (result.exitValue != 0) { + // Only show the Thrift compiler output on failures, cuts down on noise! + println task.outputBuffer.toString() + result.rethrowFailure() + } + } +} + +task generate(group: 'Build') { + description = 'Generate all unit test Thrift sources' + compileTestJava.dependsOn it +} + +task generateJava(group: 'Build') { + description = 'Generate the thrift gen-java source' + generate.dependsOn it + + ext.outputBuffer = new ByteArrayOutputStream() + + thriftCompile(it, 'ThriftTest.thrift') + thriftCompile(it, 'JavaTypes.thrift') + thriftCompile(it, 'DebugProtoTest.thrift') + thriftCompile(it, 'DoubleConstantsTest.thrift') + thriftCompile(it, 'OptionalRequiredTest.thrift') + thriftCompile(it, 'ManyOptionals.thrift') + thriftCompile(it, 'JavaDeepCopyTest.thrift') + thriftCompile(it, 'EnumContainersTest.thrift') +} + +task generateBeanJava(group: 'Build') { + description = 'Generate the thrift gen-javabean source' + generate.dependsOn it + + ext.outputBuffer = new ByteArrayOutputStream() + + thriftCompile(it, 'JavaBeansTest.thrift', 'java:beans,nocamel', genBeanSrc) +} + +task generateReuseJava(group: 'Build') { + description = 'Generate the thrift gen-javareuse source' + generate.dependsOn it + + ext.outputBuffer = new ByteArrayOutputStream() + + thriftCompile(it, 'FullCamelTest.thrift', 'java:fullcamel', genFullCamelSrc) +} + +task generateFullCamelJava(group: 'Build') { + description = 'Generate the thrift gen-fullcamel source' + generate.dependsOn it + + ext.outputBuffer = new ByteArrayOutputStream() + + thriftCompile(it, 'ReuseObjects.thrift', 'java:reuse-objects', genReuseSrc) +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/publishing.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/publishing.gradle new file mode 100644 index 000000000..961d58f78 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/publishing.gradle @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Following Gradle best practices to keep build logic organized + +// ---------------------------------------------------------------------------- +// Installation subtasks, not used currently, we use "make install/fast" +task installDist(type: Copy, group: 'Install') { + description = "Copy Thrift JAR and dependencies into $installPath location" + + destinationDir = file(installPath) + + from jar + from configurations.compile +} + +task installJavadoc(type: Copy, group: 'Install', dependsOn: javadoc) { + description = "Install Thrift JavaDoc into $installJavadocPath location" + + destinationDir = file(installJavadocPath) + + from javadoc.destinationDir +} + +// This is not needed by Gradle builds but the remaining Ant builds seem to +// need access to the generated test classes for Thrift unit tests so we +// assist them to use it this way. +task copyDependencies(type: Copy, group: 'Build') { + description = 'Copy runtime dependencies in a common location for other Ant based projects' + project.assemble.dependsOn it + + destinationDir = file("$buildDir/deps") + from configurations.testRuntime + // exclude some very specific unit test dependencies + exclude '**/junit*.jar', '**/mockito*.jar', '**/hamcrest*.jar' +} + +// ---------------------------------------------------------------------------- +// Allow this configuration to be shared between install and uploadArchives tasks +def configurePom(pom) { + pom.project { + name 'Apache Thrift' + description 'Thrift is a software framework for scalable cross-language services development.' + packaging 'jar' + url 'http://thrift.apache.org' + + scm { + url 'https://git-wip-us.apache.org/repos/asf?p=thrift.git' + connection 'scm:git:https://git-wip-us.apache.org/repos/asf/thrift.git' + developerConnection 'scm:git:https://git-wip-us.apache.org/repos/asf/thrift.git' + } + + licenses { + license { + name 'The Apache Software License, Version 2.0' + url "${project.license}" + distribution 'repo' + } + } + + developers { + developer { + id 'dev' + name 'Apache Thrift Developers' + email 'dev@thrift.apache.org' + } + } + } + + pom.whenConfigured { + // Fixup the scope for servlet-api to be 'provided' instead of 'compile' + dependencies.find { dep -> dep.groupId == 'javax.servlet' && dep.artifactId == 'servlet-api' }.with { + // it.optional = true + it.scope = 'provided' + } + } +} + +install { + repositories.mavenInstaller { + configurePom(pom) + } +} + +uploadArchives { + dependsOn test // make sure we run unit tests when publishing + repositories.mavenDeployer { + // signPom will silently do nothing when no signing information is provided + beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) } + repository(url: project.mavenRepositoryUrl) { + if (project.hasProperty('mavenUser') && project.hasProperty('mavenPassword')) { + authentication(userName: mavenUser, password: mavenPassword) + } + } + configurePom(pom) + } +} + +// Signing configuration, optional, only when release and uploadArchives is activated +signing { + required { !version.endsWith("SNAPSHOT") && gradle.taskGraph.hasTask("uploadArchives") } + sign configurations.archives +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/sourceConfiguration.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/sourceConfiguration.gradle new file mode 100644 index 000000000..decc6a275 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/sourceConfiguration.gradle @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Following Gradle best practices to keep build logic organized + +// ---------------------------------------------------------------------------- +// source sets for main and test sources +sourceSets { + main { + java { + srcDir 'src' + } + } + test { + java { + srcDir 'test' + // see functionalTests.gradle for these files + exclude '**/test/TestClient.java' + exclude '**/test/TestServer.java' + exclude '**/test/TestNonblockingServer.java' + } + resources { + srcDir 'test' + include 'log4j.properties' + } + } +} + +// ---------------------------------------------------------------------------- +// Compiler configuration details + +sourceCompatibility = '1.6' +targetCompatibility = '1.6' + +tasks.withType(JavaCompile) { + options.encoding = 'UTF-8' + options.debug = true + options.deprecation = true + // options.compilerArgs.addAll('-Xlint:unchecked') +} + +// ---------------------------------------------------------------------------- +// Jar packaging details +processResources { + into('META-INF') { + from "$thriftRoot/LICENSE" + from "$thriftRoot/NOTICE" + rename('(.+)', '$1.txt') + } +} + +jar { + project.test.dependsOn it + manifest { + attributes([ + "Implementation-Version": "${project.version}", + "Bundle-ManifestVersion": "2", + "Bundle-SymbolicName": "${project.group}", + "Bundle-Name": "Apache Thrift", + "Bundle-Version": "${project.version}", + "Bundle-Description": "Apache Thrift library", + "Bundle-License": "${project.license}", + "Bundle-ActivationPolicy": "lazy", + "Export-Package": "${project.group}.async;uses:=\"${project.group}.protocol,${project.group}.transport,org.slf4j,${project.group}\";version=\"${version}\",${project.group}.protocol;uses:=\"${project.group}.transport,${project.group},${project.group}.scheme\";version=\"${version}\",${project.group}.server;uses:=\"${project.group}.transport,${project.group}.protocol,${project.group},org.slf4j,javax.servlet,javax.servlet.http\";version=\"${version}\",${project.group}.transport;uses:=\"${project.group}.protocol,${project.group},org.apache.http.client,org.apache.http.params,org.apache.http.entity,org.apache.http.client.methods,org.apache.http,org.slf4j,javax.net.ssl,javax.net,javax.security.sasl,javax.security.auth.callback\";version=\"${version}\",${project.group};uses:=\"${project.group}.protocol,${project.group}.async,${project.group}.server,${project.group}.transport,org.slf4j,org.apache.log4j,${project.group}.scheme\";version=\"${version}\",${project.group}.meta_data;uses:=\"${project.group}\";version=\"${version}\",${project.group}.scheme;uses:=\"${project.group}.protocol,${project.group}\";version=\"${version}\"", + "Import-Package": "javax.net,javax.net.ssl,javax.security.auth.callback,javax.security.sasl,javax.servlet;resolution:=optional,javax.servlet.http;resolution:=optional,org.slf4j;resolution:=optional;version=\"[1.4,2)\",org.apache.http.client;resolution:=optional,org.apache.http.params;resolution:=optional,org.apache.http.entity;resolution:=optional,org.apache.http.client.methods;resolution:=optional,org.apache.http;resolution:=optional" + ]) + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/unitTests.gradle b/vendor/git.apache.org/thrift.git/lib/java/gradle/unitTests.gradle new file mode 100644 index 000000000..61f2fbdeb --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/unitTests.gradle @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Following Gradle best practices to keep build logic organized + +// Bundle the test classes in a JAR for other Ant based builds +task testJar(type: Jar, group: 'Build') { + description = 'Assembles a jar archive containing the test classes.' + project.test.dependsOn it + + classifier 'test' + from sourceSets.test.output +} + +// ---------------------------------------------------------------------------- +// Unit test tasks and configurations + +// Help the up to date algorithm to make these tests done +ext.markTaskDone = { task -> + def buildFile = file("$buildDir/${task.name}.flag") + task.inputs.files task.classpath + task.outputs.file buildFile + task.doLast { + buildFile.text = 'Passed!' + } +} + +task deprecatedEqualityTest(type: JavaExec, group: 'Verification') { + description = 'Run the non-JUnit test suite ' + classpath = sourceSets.test.runtimeClasspath + main 'org.apache.thrift.test.EqualityTest' + markTaskDone(it) +} + +task deprecatedJavaBeansTest(type: JavaExec, group: 'Verification') { + description = 'Run the non-JUnit test suite ' + classpath = sourceSets.test.runtimeClasspath + main 'org.apache.thrift.test.JavaBeansTest' + markTaskDone(it) +} + +// Main Unit Test task configuration +test { + description="Run the full test suite" + dependsOn deprecatedEqualityTest, deprecatedJavaBeansTest + + // Allow repeating tests even after successful execution + if (project.hasProperty('rerunTests')) { + outputs.upToDateWhen { false } + } + + include '**/Test*.class' + exclude '**/Test*\$*.class' + + maxHeapSize = '512m' + forkEvery = 1 + + systemProperties = [ + 'build.test': "${compileTestJava.destinationDir}", + 'test.port': "${testPort}", + 'javax.net.ssl.trustStore': "${projectDir}/test/.truststore", + 'javax.net.ssl.trustStorePassword': 'thrift', + 'javax.net.ssl.keyStore': "${projectDir}/test/.keystore", + 'javax.net.ssl.keyStorePassword': 'thrift' + ] +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradle/wrapper/gradle-wrapper.properties b/vendor/git.apache.org/thrift.git/lib/java/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 000000000..2c2bbe5f9 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-4.4.1-bin.zip diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradlew b/vendor/git.apache.org/thrift.git/lib/java/gradlew new file mode 100755 index 000000000..cccdd3d51 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/vendor/git.apache.org/thrift.git/lib/java/gradlew.bat b/vendor/git.apache.org/thrift.git/lib/java/gradlew.bat new file mode 100644 index 000000000..f9553162f --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/go17.go b/vendor/git.apache.org/thrift.git/lib/java/settings.gradle similarity index 90% rename from vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/go17.go rename to vendor/git.apache.org/thrift.git/lib/java/settings.gradle index a6003a917..c9bd8bc0e 100644 --- a/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/go17.go +++ b/vendor/git.apache.org/thrift.git/lib/java/settings.gradle @@ -1,5 +1,3 @@ -// +build go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -19,8 +17,4 @@ * under the License. */ -package main - -import "context" - -var defaultCtx = context.Background() +rootProject.name = 'libthrift' diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/AsyncProcessFunction.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/AsyncProcessFunction.java index 550ebd532..483c8d054 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/AsyncProcessFunction.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/AsyncProcessFunction.java @@ -20,7 +20,6 @@ package org.apache.thrift; import org.apache.thrift.async.AsyncMethodCallback; import org.apache.thrift.protocol.TMessage; -import org.apache.thrift.protocol.TMessageType; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.server.AbstractNonblockingServer; diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TAsyncProcessor.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TAsyncProcessor.java index 0a069ea00..533e74d86 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TAsyncProcessor.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TAsyncProcessor.java @@ -18,14 +18,7 @@ */ package org.apache.thrift; -import org.apache.thrift.protocol.*; - -import org.apache.thrift.server.AbstractNonblockingServer.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.Map; +import org.apache.thrift.server.AbstractNonblockingServer.AsyncFrameBuffer; public interface TAsyncProcessor { /** diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TEnumHelper.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TEnumHelper.java index c17d661c2..fbc778751 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TEnumHelper.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/TEnumHelper.java @@ -19,7 +19,6 @@ package org.apache.thrift; -import java.lang.InstantiationException; import java.lang.NoSuchMethodException; import java.lang.IllegalAccessException; import java.lang.reflect.InvocationTargetException; diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TExtensibleServlet.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TExtensibleServlet.java index d328dd67b..75082c0f7 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TExtensibleServlet.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TExtensibleServlet.java @@ -63,7 +63,7 @@ public abstract class TExtensibleServlet extends HttpServlet { * Returns the appropriate {@link TProcessor}. This will be called once just * after the {@link #init()} method * - * @return + * @return the appropriate {@link TProcessor} */ protected abstract TProcessor getProcessor(); @@ -71,7 +71,7 @@ public abstract class TExtensibleServlet extends HttpServlet { * Returns the appropriate in {@link TProtocolFactory}. This will be called * once just after the {@link #init()} method * - * @return + * @return the appropriate in {@link TProtocolFactory} */ protected abstract TProtocolFactory getInProtocolFactory(); @@ -79,7 +79,7 @@ public abstract class TExtensibleServlet extends HttpServlet { * Returns the appropriate out {@link TProtocolFactory}. This will be called * once just after the {@link #init()} method * - * @return + * @return the appropriate out {@link TProtocolFactory} */ protected abstract TProtocolFactory getOutProtocolFactory(); diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TThreadPoolServer.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TThreadPoolServer.java index 90d5e5b04..53c20e98f 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TThreadPoolServer.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/server/TThreadPoolServer.java @@ -145,22 +145,40 @@ public class TThreadPoolServer extends TServer { executorQueue); } - - public void serve() { - try { + protected ExecutorService getExecutorService() { + return executorService_; + } + + protected boolean preServe() { + try { serverTransport_.listen(); } catch (TTransportException ttx) { LOGGER.error("Error occurred during listening.", ttx); - return; + return false; } // Run the preServe event if (eventHandler_ != null) { eventHandler_.preServe(); } - stopped_ = false; setServing(true); + + return true; + } + + public void serve() { + if (!preServe()) { + return; + } + + execute(); + waitForShutdown(); + + setServing(false); + } + + protected void execute() { int failureCount = 0; while (!stopped_) { try { @@ -213,8 +231,10 @@ public class TThreadPoolServer extends TServer { } } } - - executorService_.shutdown(); + } + + protected void waitForShutdown() { + executorService_.shutdown(); // Loop until awaitTermination finally does return without a interrupted // exception. If we don't do this, then we'll shut down prematurely. We want @@ -232,7 +252,6 @@ public class TThreadPoolServer extends TServer { now = newnow; } } - setServing(false); } public void stop() { diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSSLTransportFactory.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSSLTransportFactory.java index 9c60ed1ab..2232a315c 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSSLTransportFactory.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSSLTransportFactory.java @@ -186,7 +186,11 @@ public class TSSLTransportFactory { if (params.isTrustStoreSet) { tmf = TrustManagerFactory.getInstance(params.trustManagerType); KeyStore ts = KeyStore.getInstance(params.trustStoreType); - in = getStoreAsStream(params.trustStore); + if (params.trustStoreStream != null) { + in = params.trustStoreStream; + } else { + in = getStoreAsStream(params.trustStore); + } ts.load(in, (params.trustPass != null ? params.trustPass.toCharArray() : null)); tmf.init(ts); @@ -195,7 +199,11 @@ public class TSSLTransportFactory { if (params.isKeyStoreSet) { kmf = KeyManagerFactory.getInstance(params.keyManagerType); KeyStore ks = KeyStore.getInstance(params.keyStoreType); - is = getStoreAsStream(params.keyStore); + if (params.keyStoreStream != null) { + is = params.keyStoreStream; + } else { + is = getStoreAsStream(params.keyStore); + } ks.load(is, params.keyPass.toCharArray()); kmf.init(ks, params.keyPass.toCharArray()); } @@ -273,10 +281,12 @@ public class TSSLTransportFactory { public static class TSSLTransportParameters { protected String protocol = "TLS"; protected String keyStore; + protected InputStream keyStoreStream; protected String keyPass; protected String keyManagerType = KeyManagerFactory.getDefaultAlgorithm(); protected String keyStoreType = "JKS"; protected String trustStore; + protected InputStream trustStoreStream; protected String trustPass; protected String trustManagerType = TrustManagerFactory.getDefaultAlgorithm(); protected String trustStoreType = "JKS"; @@ -332,7 +342,20 @@ public class TSSLTransportFactory { } isKeyStoreSet = true; } - + + /** + * Set the keystore, password, certificate type and the store type + * + * @param keyStoreStream Keystore content input stream + * @param keyPass Keystore password + * @param keyManagerType The default is X509 + * @param keyStoreType The default is JKS + */ + public void setKeyStore(InputStream keyStoreStream, String keyPass, String keyManagerType, String keyStoreType) { + this.keyStoreStream = keyStoreStream; + setKeyStore("", keyPass, keyManagerType, keyStoreType); + } + /** * Set the keystore and password * @@ -342,7 +365,17 @@ public class TSSLTransportFactory { public void setKeyStore(String keyStore, String keyPass) { setKeyStore(keyStore, keyPass, null, null); } - + + /** + * Set the keystore and password + * + * @param keyStoreStream Keystore content input stream + * @param keyPass Keystore password + */ + public void setKeyStore(InputStream keyStoreStream, String keyPass) { + setKeyStore(keyStoreStream, keyPass, null, null); + } + /** * Set the truststore, password, certificate type and the store type * @@ -362,6 +395,19 @@ public class TSSLTransportFactory { } isTrustStoreSet = true; } + + /** + * Set the truststore, password, certificate type and the store type + * + * @param trustStoreStream Truststore content input stream + * @param trustPass Truststore password + * @param trustManagerType The default is X509 + * @param trustStoreType The default is JKS + */ + public void setTrustStore(InputStream trustStoreStream, String trustPass, String trustManagerType, String trustStoreType) { + this.trustStoreStream = trustStoreStream; + setTrustStore("", trustPass, trustManagerType, trustStoreType); + } /** * Set the truststore and password @@ -372,6 +418,16 @@ public class TSSLTransportFactory { public void setTrustStore(String trustStore, String trustPass) { setTrustStore(trustStore, trustPass, null, null); } + + /** + * Set the truststore and password + * + * @param trustStoreStream Truststore content input stream + * @param trustPass Truststore password + */ + public void setTrustStore(InputStream trustStoreStream, String trustPass) { + setTrustStore(trustStoreStream, trustPass, null, null); + } /** * Set if client authentication is required @@ -380,6 +436,6 @@ public class TSSLTransportFactory { */ public void requireClientAuth(boolean clientAuth) { this.clientAuth = clientAuth; - } - } + } + } } diff --git a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSaslTransport.java b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSaslTransport.java index a94d9a776..bbd3f9a34 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSaslTransport.java +++ b/vendor/git.apache.org/thrift.git/lib/java/src/org/apache/thrift/transport/TSaslTransport.java @@ -287,7 +287,7 @@ abstract class TSaslTransport extends TTransport { if (message.status == NegotiationStatus.COMPLETE && getRole() == SaslRole.CLIENT) { LOGGER.debug("{}: All done!", getRole()); - break; + continue; } sendSaslMessage(sasl.isComplete() ? NegotiationStatus.COMPLETE : NegotiationStatus.OK, @@ -295,8 +295,6 @@ abstract class TSaslTransport extends TTransport { } LOGGER.debug("{}: Main negotiation loop complete", getRole()); - assert sasl.isComplete(); - // If we're the client, and we're complete, but the server isn't // complete yet, we need to wait for its response. This will occur // with ANONYMOUS auth, for example, where we send an initial response diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/Fixtures.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/Fixtures.java index 9f28124b7..81671d8de 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/Fixtures.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/Fixtures.java @@ -277,7 +277,7 @@ public class Fixtures { nesting = new Nesting(bonk, oneOfEach); holyMoley = new HolyMoley(); - ArrayList big = new ArrayList(); + List big = new ArrayList(); big.add(new OneOfEach(oneOfEach)); big.add(nesting.my_ooe); holyMoley.setBig(big); diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestRenderedDoubleConstants.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestRenderedDoubleConstants.java new file mode 100644 index 000000000..d691fe356 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestRenderedDoubleConstants.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.thrift; + +import java.util.List; +import junit.framework.TestCase; +import static org.junit.Assert.*; +import org.junit.Test; +import thrift.test.DoubleConstantsTestConstants; + +public class TestRenderedDoubleConstants extends TestCase { + private static final double EPSILON = 0.0000001; + private static final String ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST = + "failed to verify a double constant generated by Thrift (expected = %f, got = %f)"; + private static final String ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST = + "failed to verify a list item by Thrift (expected = %f, got = %f)"; + private static final String ASSERTION_MESSAGE_FOR_TYPE_CHECKS = + "the rendered variable with name %s is not of double type"; + + // to make sure lists containing doubles are generated correctly + public void testRenderedDoubleList() throws Exception { + final double[] EXPECTED_LIST = + {1d,-100d,100d,9223372036854775807d,-9223372036854775807d,3.14159265359,1000000.1,-1000000.1,1.7e+308, + -1.7e+308,9223372036854775816.43,-9223372036854775816.43}; + assertEquals(EXPECTED_LIST.length, DoubleConstantsTestConstants.DOUBLE_LIST_TEST.size()); + for (int i = 0; i < EXPECTED_LIST.length; ++i) { + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST, + EXPECTED_LIST[i], + DoubleConstantsTestConstants.DOUBLE_LIST_TEST.get(i)), + EXPECTED_LIST[i], DoubleConstantsTestConstants.DOUBLE_LIST_TEST.get(i), EPSILON); + } + } + + // to make sure the variables inside Thrift files are generated correctly + public void testRenderedDoubleConstants() throws Exception { + final double EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0; + final double EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0; + final double EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0; + final double EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0; + final double EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359; + final double EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1; + final double EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1; + final double EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308; + final double EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43; + final double EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308; + final double EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43; + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, EPSILON); + assertEquals( + String.format( + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST), + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, EPSILON); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST)); + //assertTrue( + // String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST"), + // Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST)); + assertTrue( + String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST"), + Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST)); + //assertTrue( + // String.format(ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST"), + // Double.class.isInstance(DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST)); + assertTrue( + String.format( + ASSERTION_MESSAGE_FOR_TYPE_CHECKS, "DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST"), + Double.class.isInstance( + DoubleConstantsTestConstants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST)); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestReuse.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestReuse.java index db16c74ab..b44abd0d2 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestReuse.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/TestReuse.java @@ -21,10 +21,7 @@ package org.apache.thrift; import java.util.HashSet; -import junit.framework.TestCase; - import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TType; import thrift.test.Reuse; diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTProtocolUtil.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTProtocolUtil.java index 199c70753..89cf5366e 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTProtocolUtil.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTProtocolUtil.java @@ -18,24 +18,10 @@ */ package org.apache.thrift.protocol; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; - import junit.framework.TestCase; -import org.apache.thrift.Fixtures; -import org.apache.thrift.TBase; -import org.apache.thrift.TDeserializer; -import org.apache.thrift.TException; import org.apache.thrift.TSerializer; -import org.apache.thrift.transport.TMemoryBuffer; -import thrift.test.CompactProtoTestStruct; -import thrift.test.HolyMoley; -import thrift.test.Nesting; -import thrift.test.OneOfEach; -import thrift.test.Srv; import thrift.test.GuessProtocolStruct; public class TestTProtocolUtil extends TestCase { diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTSimpleJSONProtocol.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTSimpleJSONProtocol.java index 0b9c73286..b8c465748 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTSimpleJSONProtocol.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/protocol/TestTSimpleJSONProtocol.java @@ -84,7 +84,7 @@ public class TestTSimpleJSONProtocol extends TestCase { struct.unsetDouble_byte_map(); struct.unsetString_byte_map(); struct.write(proto); - assertEquals("{\"a_byte\":127,\"a_i16\":32000,\"a_i32\":1000000000,\"a_i64\":1099511627775,\"a_double\":5.6789,\"a_string\":\"my string\",\"a_binary\":\"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\",\"true_field\":1,\"false_field\":0,\"empty_struct_field\":{},\"byte_list\":[-127,-1,0,1,127],\"i16_list\":[-1,0,1,32767],\"i32_list\":[-1,0,255,65535,16777215,2147483647],\"i64_list\":[-1,0,255,65535,16777215,4294967295,1099511627775,281474976710655,72057594037927935,9223372036854775807],\"double_list\":[0.1,0.2,0.3],\"string_list\":[\"first\",\"second\",\"third\"],\"boolean_list\":[1,1,1,0,0,0],\"struct_list\":[{},{}],\"i32_set\":[1,2,3],\"boolean_set\":[0,1],\"struct_set\":[{}],\"byte_byte_map\":{\"1\":2},\"boolean_byte_map\":{\"0\":0,\"1\":1},\"byte_i16_map\":{\"1\":1,\"2\":-1,\"3\":32767},\"byte_i32_map\":{\"1\":1,\"2\":-1,\"3\":2147483647},\"byte_i64_map\":{\"1\":1,\"2\":-1,\"3\":9223372036854775807},\"byte_double_map\":{\"1\":0.1,\"2\":-0.1,\"3\":1000000.0},\"byte_string_map\":{\"1\":\"\",\"2\":\"blah\",\"3\":\"loooooooooooooong string\"},\"byte_boolean_map\":{\"1\":1,\"2\":0},\"byte_map_map\":{\"0\":{},\"1\":{\"1\":1},\"2\":{\"1\":1,\"2\":2}},\"byte_set_map\":{\"0\":[],\"1\":[1],\"2\":[1,2]},\"byte_list_map\":{\"0\":[],\"1\":[1],\"2\":[1,2]}}", bufToString()); + assertEquals("{\"a_byte\":127,\"a_i16\":32000,\"a_i32\":1000000000,\"a_i64\":1099511627775,\"a_double\":5.6789,\"a_string\":\"my string\",\"a_binary\":\"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\",\"true_field\":1,\"false_field\":0,\"empty_struct_field\":{},\"byte_list\":[-127,-1,0,1,127],\"i16_list\":[-1,0,1,32767],\"i32_list\":[-1,0,255,65535,16777215,2147483647],\"i64_list\":[-1,0,255,65535,16777215,4294967295,1099511627775,281474976710655,72057594037927935,9223372036854775807],\"double_list\":[0.1,0.2,0.3],\"string_list\":[\"first\",\"second\",\"third\"],\"boolean_list\":[1,1,1,0,0,0],\"struct_list\":[{},{}],\"i32_set\":[1,2,3],\"boolean_set\":[0,1],\"struct_set\":[{}],\"byte_byte_map\":{\"1\":2},\"boolean_byte_map\":{\"0\":0,\"1\":1},\"byte_i16_map\":{\"1\":1,\"2\":-1,\"3\":32767},\"byte_i32_map\":{\"1\":1,\"2\":-1,\"3\":2147483647},\"byte_i64_map\":{\"1\":1,\"2\":-1,\"3\":9223372036854775807},\"byte_double_map\":{\"1\":0.1,\"2\":-0.1,\"3\":1000000.1},\"byte_string_map\":{\"1\":\"\",\"2\":\"blah\",\"3\":\"loooooooooooooong string\"},\"byte_boolean_map\":{\"1\":1,\"2\":0},\"byte_map_map\":{\"0\":{},\"1\":{\"1\":1},\"2\":{\"1\":1,\"2\":2}},\"byte_set_map\":{\"0\":[],\"1\":[1],\"2\":[1,2]},\"byte_list_map\":{\"0\":[],\"1\":[1],\"2\":[1,2]}}", bufToString()); } public void testThrowsOnCollectionKeys() throws TException { diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/server/ServerTestBase.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/server/ServerTestBase.java index e245963d6..1dee22d17 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/server/ServerTestBase.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/server/ServerTestBase.java @@ -267,7 +267,7 @@ public abstract class ServerTestBase extends TestCase { System.out.println("testOneway(" + Integer.toString(sleepFor) + ") => sleeping..."); try { - Thread.sleep(sleepFor * 1000); + Thread.sleep(sleepFor * SLEEP_DELAY); System.out.println("Done sleeping!"); } catch (InterruptedException ie) { throw new RuntimeException(ie); @@ -282,6 +282,7 @@ public abstract class ServerTestBase extends TestCase { public static final String HOST = "localhost"; public static final int PORT = Integer.valueOf( System.getProperty("test.port", "9090")); + protected static final int SLEEP_DELAY = 1000; protected static final int SOCKET_TIMEOUT = 1500; private static final Xtruct XSTRUCT = new Xtruct("Zero", (byte) 1, -3, -5); private static final Xtruct2 XSTRUCT2 = new Xtruct2((byte)1, XSTRUCT, 5); @@ -388,7 +389,7 @@ public abstract class ServerTestBase extends TestCase { public void testIt() throws Exception { for (TProtocolFactory protoFactory : getProtocols()) { - TProcessor processor = useAsyncProcessor() ? new ThriftTest.AsyncProcessor(new AsyncTestHandler()) : new ThriftTest.Processor(new TestHandler()); + TProcessor processor = useAsyncProcessor() ? new ThriftTest.AsyncProcessor(new AsyncTestHandler()) : new ThriftTest.Processor(new TestHandler()); startServer(processor, protoFactory); @@ -537,7 +538,7 @@ public abstract class ServerTestBase extends TestCase { public void testTransportFactory() throws Exception { for (TProtocolFactory protoFactory : getProtocols()) { TestHandler handler = new TestHandler(); - ThriftTest.Processor processor = new ThriftTest.Processor(handler); + ThriftTest.Processor processor = new ThriftTest.Processor(handler); final CallCountingTransportFactory factory = new CallCountingTransportFactory(new TFramedTransport.Factory()); diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactory.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactory.java index 478407a2f..032c2eb71 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactory.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactory.java @@ -45,6 +45,10 @@ public class TestTSSLTransportFactory extends ServerTestBase { throws Exception { return TSSLTransportFactory.getClientSocket(HOST, PORT); } + + protected TServerSocket getServerTransport() throws Exception { + return TSSLTransportFactory.getServerSocket(PORT); + } @Override public void startServer(final TProcessor processor, final TProtocolFactory protoFactory, final TTransportFactory factory) @@ -52,11 +56,11 @@ public class TestTSSLTransportFactory extends ServerTestBase { serverThread = new Thread() { public void run() { try { - TServerTransport serverTransport = TSSLTransportFactory.getServerSocket(PORT); + TServerTransport serverTransport = getServerTransport(); final Args args = new Args(serverTransport).processor(processor); server = new TSimpleServer(args); server.serve(); - } catch (TTransportException e) { + } catch (Exception e) { e.printStackTrace(); assert false; } @@ -64,7 +68,7 @@ public class TestTSSLTransportFactory extends ServerTestBase { }; serverThread.start(); - Thread.sleep(1000); + Thread.sleep(SLEEP_DELAY); } @Override diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactoryStreamedStore.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactoryStreamedStore.java new file mode 100644 index 000000000..25bf5cebb --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSSLTransportFactoryStreamedStore.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.thrift.transport; + +import java.io.FileInputStream; +import java.net.InetAddress; + +public class TestTSSLTransportFactoryStreamedStore extends TestTSSLTransportFactory { + private static String keyStoreLocation = System.getProperty("javax.net.ssl.keyStore"); + private static String trustStoreLocation = System.getProperty("javax.net.ssl.trustStore"); + + public TestTSSLTransportFactoryStreamedStore() { + super(); + + /** + * Override system properties to be able to test passing + * the trustStore and keyStore as input stream + */ + System.setProperty("javax.net.ssl.trustStore", ""); + System.setProperty("javax.net.ssl.keyStore", ""); + } + + @Override + public TTransport getClientTransport(TTransport underlyingTransport) + throws Exception { + TSSLTransportFactory.TSSLTransportParameters params = new + TSSLTransportFactory.TSSLTransportParameters(); + + params.setTrustStore(new FileInputStream(trustStoreLocation), + System.getProperty("javax.net.ssl.trustStorePassword")); + + return TSSLTransportFactory.getClientSocket(HOST, PORT, 0/*timeout*/, params); + } + + @Override + protected TServerSocket getServerTransport() throws Exception { + TSSLTransportFactory.TSSLTransportParameters params = new + TSSLTransportFactory.TSSLTransportParameters(); + + params.setKeyStore(new FileInputStream(keyStoreLocation), + System.getProperty("javax.net.ssl.keyStorePassword")); + + return TSSLTransportFactory.getServerSocket(PORT, 0/*timeout*/, InetAddress.getByName(HOST), params); + } +} \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSimpleFileTransport.java b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSimpleFileTransport.java index 5d2fb45f8..7b880f499 100644 --- a/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSimpleFileTransport.java +++ b/vendor/git.apache.org/thrift.git/lib/java/test/org/apache/thrift/transport/TestTSimpleFileTransport.java @@ -27,21 +27,21 @@ public class TestTSimpleFileTransport extends TestCase { public void testFresh() throws Exception { //Test write side Path tempFilePathName = Files.createTempFile("TSimpleFileTransportTest", null); - Files.delete(tempFilePathName); + Files.delete(tempFilePathName); byte[] input_buf = new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; TSimpleFileTransport trans_write = new TSimpleFileTransport(tempFilePathName.toString(),false, true, false); assert (!trans_write.isOpen()); trans_write.open(); assert(trans_write.isOpen()); trans_write.write(input_buf); - trans_write.write(input_buf,2,2); + trans_write.write(input_buf,2,2); trans_write.flush(); trans_write.close(); - + //Test read side TSimpleFileTransport trans = new TSimpleFileTransport(tempFilePathName.toString(),true, false); - assert(trans_write.isOpen()); - + assert(trans.isOpen()); + //Simple file trans provides no buffer access assert(0 == trans.getBufferPosition()); assert(null == trans.getBuffer()); @@ -56,19 +56,19 @@ public class TestTSimpleFileTransport extends TestCase { trans.readAll(buf1, 0, BUFSIZ); assert(BUFSIZ == trans.getFilePointer()); assert(Arrays.equals(new byte[]{1, 2, 3, 4}, buf1)); - + int bytesRead = trans.read(buf1, 0, BUFSIZ); assert(bytesRead > 0); for (int i = 0; i < bytesRead; ++i) { - assert(buf1[i] == i+5); + assert(buf1[i] == i+5); } - + trans.seek(0); assert(0 == trans.getFilePointer()); trans.readAll(buf1, 0, BUFSIZ); assert(Arrays.equals(new byte[]{1, 2, 3, 4}, buf1)); assert(BUFSIZ == trans.getFilePointer()); trans.close(); - Files.delete(tempFilePathName); + Files.delete(tempFilePathName); } } diff --git a/vendor/git.apache.org/thrift.git/lib/js/Gruntfile.js b/vendor/git.apache.org/thrift.git/lib/js/Gruntfile.js index bff250fa4..1dcead650 100644 --- a/vendor/git.apache.org/thrift.git/lib/js/Gruntfile.js +++ b/vendor/git.apache.org/thrift.git/lib/js/Gruntfile.js @@ -38,11 +38,14 @@ module.exports = function(grunt) { }, shell: { InstallThriftJS: { - command: 'mkdir test/build; mkdir test/build/js; cp src/thrift.js test/build/js/thrift.js' + command: 'mkdir test/build; mkdir test/build/js; mkdir test/build/js/lib; cp src/thrift.js test/build/js/thrift.js' }, InstallThriftNodeJSDep: { command: 'cd ../..; npm install' }, + InstallTestLibs: { + command: 'cd test; ant download_jslibs' + }, ThriftGen: { command: '../../compiler/cpp/thrift -gen js -gen js:node -o test ../../test/ThriftTest.thrift' }, @@ -51,35 +54,33 @@ module.exports = function(grunt) { }, ThriftGenDeepConstructor: { command: '../../compiler/cpp/thrift -gen js -o test ../../test/JsDeepConstructorTest.thrift' - } - }, - external_daemon: { + }, + ThriftGenDoubleConstants: { + command: '../../compiler/cpp/thrift -gen js -o test ../../test/DoubleConstantsTest.thrift' + }, + ThriftGenES6: { + command: '../../compiler/cpp/thrift -gen js -gen js:es6 -o test ../../test/ThriftTest.thrift' + }, ThriftTestServer: { options: { - startCheck: function(stdout, stderr) { - return (/Thrift Server running on port/).test(stdout); - }, - nodeSpawnOptions: { - cwd: "test", - env: {NODE_PATH: "../../nodejs/lib:../../../node_modules"} - } + async: true, + execOptions: { + cwd: "./test", + env: {NODE_PATH: "../../nodejs/lib:../../../node_modules"} + } }, - cmd: "node", - args: ["server_http.js"] + command: "node server_http.js", }, ThriftTestServer_TLS: { options: { - startCheck: function(stdout, stderr) { - return (/Thrift Server running on port/).test(stdout); - }, - nodeSpawnOptions: { - cwd: "test", - env: {NODE_PATH: "../../nodejs/lib:../../../node_modules"} - } + async: true, + execOptions: { + cwd: "./test", + env: {NODE_PATH: "../../nodejs/lib:../../../node_modules"} + } }, - cmd: "node", - args: ["server_https.js"] - } + command: "node server_https.js", + }, }, qunit: { ThriftJS: { @@ -96,6 +97,14 @@ module.exports = function(grunt) { ] } }, + ThriftJS_DoubleRendering: { + options: { + '--ignore-ssl-errors': true, + urls: [ + 'http://localhost:8088/test-double-rendering.html' + ] + } + }, ThriftWS: { options: { urls: [ @@ -133,6 +142,13 @@ module.exports = function(grunt) { 'http://localhost:8088/test-deep-constructor.html' ] } + }, + ThriftWSES6: { + options: { + urls: [ + 'http://localhost:8088/test-es6.html' + ] + } } }, jshint: { @@ -154,19 +170,25 @@ module.exports = function(grunt) { grunt.loadNpmTasks('grunt-contrib-qunit'); grunt.loadNpmTasks('grunt-contrib-concat'); grunt.loadNpmTasks('grunt-jsdoc'); - grunt.loadNpmTasks('grunt-external-daemon'); - grunt.loadNpmTasks('grunt-shell'); + grunt.loadNpmTasks('grunt-shell-spawn'); + + grunt.registerTask('wait', 'Wait just one second for server to start', function () { + var done = this.async(); + setTimeout(function() { + done(true); + }, 1000); + }); grunt.registerTask('test', ['jshint', 'shell:InstallThriftJS', 'shell:InstallThriftNodeJSDep', 'shell:ThriftGen', - 'external_daemon:ThriftTestServer', 'external_daemon:ThriftTestServer_TLS', + 'shell:InstallTestLibs', + 'shell:ThriftTestServer', 'shell:ThriftTestServer_TLS', + 'wait', 'shell:ThriftGenDeepConstructor', 'qunit:ThriftDeepConstructor', 'qunit:ThriftJS', 'qunit:ThriftJS_TLS', - 'shell:ThriftGenJQ', 'qunit:ThriftJSJQ', 'qunit:ThriftJSJQ_TLS' + 'qunit:ThriftWS', + 'shell:ThriftGenJQ', 'qunit:ThriftJSJQ', 'qunit:ThriftJSJQ_TLS', + 'shell:ThriftGenES6', 'qunit:ThriftWSES6', + 'shell:ThriftTestServer:kill', 'shell:ThriftTestServer_TLS:kill', ]); - grunt.registerTask('default', ['jshint', 'shell:InstallThriftJS', 'shell:InstallThriftNodeJSDep', 'shell:ThriftGen', - 'external_daemon:ThriftTestServer', 'external_daemon:ThriftTestServer_TLS', - 'qunit:ThriftJS', 'qunit:ThriftJS_TLS', - 'shell:ThriftGenJQ', 'qunit:ThriftJSJQ', 'qunit:ThriftJSJQ_TLS', - 'concat', 'uglify', 'jsdoc' - ]); + grunt.registerTask('default', ['test', 'concat', 'uglify', 'jsdoc']); }; diff --git a/vendor/git.apache.org/thrift.git/lib/js/package-lock.json b/vendor/git.apache.org/thrift.git/lib/js/package-lock.json new file mode 100644 index 000000000..02347cfa5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/js/package-lock.json @@ -0,0 +1,2285 @@ +{ + "name": "thrift", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha1-+PLIh60Qv2f2NPAFtph/7TF5qsg=" + }, + "ajv": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", + "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=", + "dev": true, + "requires": { + "co": "4.6.0", + "fast-deep-equal": "1.1.0", + "fast-json-stable-stringify": "2.0.0", + "json-schema-traverse": "0.3.1" + } + }, + "align-text": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/align-text/-/align-text-0.1.4.tgz", + "integrity": "sha1-DNkKVhCT810KmSVsIrcGlDP60Rc=", + "dev": true, + "requires": { + "kind-of": "3.2.2", + "longest": "1.0.1", + "repeat-string": "1.6.1" + } + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "1.0.3" + }, + "dependencies": { + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + } + } + }, + "array-find-index": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", + "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", + "dev": true + }, + "asn1": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.3.tgz", + "integrity": "sha1-2sh4dxPJlmhJ/IGAd36+nB3fO4Y=", + "dev": true + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "dev": true + }, + "async": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/async/-/async-1.5.2.tgz", + "integrity": "sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=", + "dev": true + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", + "dev": true + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "dev": true + }, + "aws4": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.6.0.tgz", + "integrity": "sha1-g+9cqGCysy5KDe7e6MdxudtXRx4=", + "dev": true + }, + "babylon": { + "version": "7.0.0-beta.19", + "resolved": "https://registry.npmjs.org/babylon/-/babylon-7.0.0-beta.19.tgz", + "integrity": "sha1-6SjH6AfpcOBTaweKs+DEj54FJQM=" + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz", + "integrity": "sha1-Y7xdy2EzG5K8Bf1SiVPDNGKgb40=", + "dev": true, + "optional": true, + "requires": { + "tweetnacl": "0.14.5" + } + }, + "bluebird": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz", + "integrity": "sha1-2VUfnemPH82h5oPRfukaBgLuLrk=" + }, + "boom": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/boom/-/boom-4.3.1.tgz", + "integrity": "sha1-T4owBctKfjiJ90kDD9JbluAdLjE=", + "dev": true, + "requires": { + "hoek": "4.2.1" + } + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "1.0.0", + "concat-map": "0.0.1" + } + }, + "browserify-zlib": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.1.4.tgz", + "integrity": "sha1-uzX4pRn2AOD6a4SFJByXnQFB+y0=", + "dev": true, + "requires": { + "pako": "0.2.9" + } + }, + "buffer-shims": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-shims/-/buffer-shims-1.0.0.tgz", + "integrity": "sha1-mXjOMXOIxkmth5MCjDR37wRKi1E=", + "dev": true + }, + "builtin-modules": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz", + "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=", + "dev": true + }, + "camelcase": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", + "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", + "dev": true + }, + "camelcase-keys": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-2.1.0.tgz", + "integrity": "sha1-MIvur/3ygRkFHvodkyITyRuPkuc=", + "dev": true, + "requires": { + "camelcase": "2.1.1", + "map-obj": "1.0.1" + } + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", + "dev": true + }, + "catharsis": { + "version": "0.8.9", + "resolved": "https://registry.npmjs.org/catharsis/-/catharsis-0.8.9.tgz", + "integrity": "sha1-mMyJDKZS3S7w5ws3klMQ/56Q/Is=", + "requires": { + "underscore-contrib": "0.3.0" + } + }, + "center-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz", + "integrity": "sha1-qg0yYptu6XIgBBHL1EYckHvCt60=", + "dev": true, + "requires": { + "align-text": "0.1.4", + "lazy-cache": "1.0.4" + } + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "requires": { + "ansi-styles": "2.2.1", + "escape-string-regexp": "1.0.5", + "has-ansi": "2.0.0", + "strip-ansi": "3.0.1", + "supports-color": "2.0.0" + } + }, + "cli": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cli/-/cli-1.0.1.tgz", + "integrity": "sha1-IoF1NPJL+klQw01TLUjsvGIbjBQ=", + "dev": true, + "requires": { + "exit": "0.1.2", + "glob": "7.1.2" + }, + "dependencies": { + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha1-wZyd+aAocC1nhhI4SmVSQExjbRU=", + "dev": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + } + } + }, + "cliui": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-2.1.0.tgz", + "integrity": "sha1-S0dXYP+AJkx2LDoXGQMukcf+oNE=", + "dev": true, + "requires": { + "center-align": "0.1.3", + "right-align": "0.1.3", + "wordwrap": "0.0.2" + } + }, + "co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=", + "dev": true + }, + "coffeescript": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/coffeescript/-/coffeescript-1.10.0.tgz", + "integrity": "sha1-56qDAZF+9iGzXYo580jc3R234z4=", + "dev": true + }, + "colors": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.1.2.tgz", + "integrity": "sha1-FopHAXVran9RoSzgyXv6KMCE7WM=", + "dev": true + }, + "combined-stream": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz", + "integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=", + "dev": true, + "requires": { + "delayed-stream": "1.0.0" + } + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.0.tgz", + "integrity": "sha1-CqxmL9Ur54lk1VMvaUeE5wEQrPc=", + "dev": true, + "requires": { + "inherits": "2.0.3", + "readable-stream": "2.3.5", + "typedarray": "0.0.6" + }, + "dependencies": { + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "readable-stream": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.5.tgz", + "integrity": "sha512-tK0yDhrkygt/knjowCUiWP9YdV7c5R+8cR0r/kt9ZhBU906Fs6RpQJCEilamRJj1Nx2rWI6LkW9gKqjTkshhEw==", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "2.0.0", + "safe-buffer": "5.1.1", + "string_decoder": "1.0.3", + "util-deprecate": "1.0.2" + } + }, + "string_decoder": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.0.3.tgz", + "integrity": "sha1-D8Z9fBQYJd6UKC3VNr7GubzoYKs=", + "dev": true, + "requires": { + "safe-buffer": "5.1.1" + } + } + } + }, + "console-browserify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.1.0.tgz", + "integrity": "sha1-8CQcRXMKn8YyOyBtvzjtx0HQuxA=", + "dev": true, + "requires": { + "date-now": "0.1.4" + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cross-spawn": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-3.0.1.tgz", + "integrity": "sha1-ElYDfsufDF9549bvE14wdwGEuYI=", + "dev": true, + "requires": { + "lru-cache": "4.1.1", + "which": "1.3.0" + }, + "dependencies": { + "lru-cache": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.1.tgz", + "integrity": "sha1-Yi4y6CSItJJ5EUpPns9F581rulU=", + "dev": true, + "requires": { + "pseudomap": "1.0.2", + "yallist": "2.1.2" + } + }, + "which": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.0.tgz", + "integrity": "sha1-/wS9/AEO5UfXgL7DjhrBwnd9JTo=", + "dev": true, + "requires": { + "isexe": "2.0.0" + } + } + } + }, + "cryptiles": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-3.1.2.tgz", + "integrity": "sha1-qJ+7Ig9c4l7FboxKqKT9e1sNKf4=", + "dev": true, + "requires": { + "boom": "5.2.0" + }, + "dependencies": { + "boom": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/boom/-/boom-5.2.0.tgz", + "integrity": "sha1-XdnabuOl8wIHdDYpDLcX0/SlTgI=", + "dev": true, + "requires": { + "hoek": "4.2.1" + } + } + } + }, + "currently-unhandled": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", + "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", + "dev": true, + "requires": { + "array-find-index": "1.0.2" + } + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "dev": true, + "requires": { + "assert-plus": "1.0.0" + } + }, + "date-now": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/date-now/-/date-now-0.1.4.tgz", + "integrity": "sha1-6vQ5/U1ISK105cx9vvIAZyueNFs=", + "dev": true + }, + "dateformat": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-1.0.12.tgz", + "integrity": "sha1-nxJLZ1lMk3/3BpMuSmQsyo27/uk=", + "dev": true, + "requires": { + "get-stdin": "4.0.1", + "meow": "3.7.0" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha1-XRKFFd8TT/Mn6QpMk/Tgd6U2NB8=", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "dev": true + }, + "dom-serializer": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.1.0.tgz", + "integrity": "sha1-BzxpdUbOB4DOI75KKOKT5AvDDII=", + "dev": true, + "requires": { + "domelementtype": "1.1.3", + "entities": "1.1.1" + }, + "dependencies": { + "domelementtype": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.1.3.tgz", + "integrity": "sha1-vSh3PiZCiBrsUVRJJCmcXNgiGFs=", + "dev": true + }, + "entities": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.1.tgz", + "integrity": "sha1-blwtClYhtdra7O+AuQ7ftc13cvA=", + "dev": true + } + } + }, + "domelementtype": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.0.tgz", + "integrity": "sha1-sXrtguirWeUt2cGbF1bg/BhyBMI=", + "dev": true + }, + "domhandler": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.3.0.tgz", + "integrity": "sha1-LeWaCCLVAn+r/28DLCsloqir5zg=", + "dev": true, + "requires": { + "domelementtype": "1.3.0" + } + }, + "domutils": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", + "integrity": "sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8=", + "dev": true, + "requires": { + "dom-serializer": "0.1.0", + "domelementtype": "1.3.0" + } + }, + "ecc-jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz", + "integrity": "sha1-D8c6ntXw1Tw4GTOYUj735UN3dQU=", + "dev": true, + "optional": true, + "requires": { + "jsbn": "0.1.1" + } + }, + "entities": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-1.0.0.tgz", + "integrity": "sha1-sph6o4ITR/zeZCsk/fyeT7cSvyY=", + "dev": true + }, + "error-ex": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.1.tgz", + "integrity": "sha1-+FWobOYa3E6GIcPNoh56dhLDqNw=", + "dev": true, + "requires": { + "is-arrayish": "0.2.1" + } + }, + "es6-promise": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.4.tgz", + "integrity": "sha512-/NdNZVJg+uZgtm9eS3O6lrOLYmQag2DjdEXuPaHlZ6RuVqgqaVZfgYCepEIKsLqwdQArOPtC3XzRLqGGfT8KQQ==", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" + }, + "esprima": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-2.7.3.tgz", + "integrity": "sha1-luO3DVd59q1JzQMmc9HDEnZ7pYE=", + "dev": true + }, + "eventemitter2": { + "version": "0.4.14", + "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz", + "integrity": "sha1-j2G3XN4BKy6esoTUVFWDtWQ7Yas=", + "dev": true + }, + "exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=", + "dev": true + }, + "extend": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.1.tgz", + "integrity": "sha1-p1Xqe8Gt/MWjHOfnYtuq3F5jZEQ=", + "dev": true + }, + "extract-zip": { + "version": "1.6.6", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.6.6.tgz", + "integrity": "sha1-EpDt6NINCHK0Kf0/NRyhKOxe+Fw=", + "dev": true, + "requires": { + "concat-stream": "1.6.0", + "debug": "2.6.9", + "mkdirp": "0.5.0", + "yauzl": "2.4.1" + }, + "dependencies": { + "mkdirp": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", + "integrity": "sha1-HXMHam35hs2TROFecfzAWkyavxI=", + "dev": true, + "requires": { + "minimist": "0.0.8" + } + } + } + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", + "dev": true + }, + "fast-deep-equal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz", + "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=", + "dev": true + }, + "fd-slicer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.0.1.tgz", + "integrity": "sha1-i1vL2ewyfFBBv5qwI/1nUPEXfmU=", + "dev": true, + "requires": { + "pend": "1.2.0" + } + }, + "figures": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", + "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", + "dev": true, + "requires": { + "escape-string-regexp": "1.0.5", + "object-assign": "4.1.1" + } + }, + "find-up": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-1.1.2.tgz", + "integrity": "sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8=", + "dev": true, + "requires": { + "path-exists": "2.1.0", + "pinkie-promise": "2.0.1" + } + }, + "findup-sync": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-0.3.0.tgz", + "integrity": "sha1-N5MKpdgWt3fANEXhlmzGeQpMCxY=", + "dev": true, + "requires": { + "glob": "5.0.15" + }, + "dependencies": { + "glob": { + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, + "requires": { + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + } + } + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", + "dev": true + }, + "form-data": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz", + "integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=", + "dev": true, + "requires": { + "asynckit": "0.4.0", + "combined-stream": "1.0.6", + "mime-types": "2.1.18" + } + }, + "fs-extra": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-1.0.0.tgz", + "integrity": "sha1-zTzl9+fLYUWIP8rjGR6Yd/hYeVA=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "jsonfile": "2.4.0", + "klaw": "1.3.1" + }, + "dependencies": { + "klaw": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-1.3.1.tgz", + "integrity": "sha1-QIhDO0azsbolnXh4XY6W9zugJDk=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + } + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "get-stdin": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-4.0.1.tgz", + "integrity": "sha1-uWjGsKBDhDJJAui/Gl3zJXmkUP4=", + "dev": true + }, + "getobject": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/getobject/-/getobject-0.1.0.tgz", + "integrity": "sha1-BHpEl4n6Fg0Bj1SG7ZEyC27HiFw=", + "dev": true + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "dev": true, + "requires": { + "assert-plus": "1.0.0" + } + }, + "glob": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.0.6.tgz", + "integrity": "sha1-IRuvr0nlJbjNkyYNFKsTYVKz9Xo=", + "dev": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "graceful-fs": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", + "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=" + }, + "grunt": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/grunt/-/grunt-1.0.2.tgz", + "integrity": "sha1-TmpeaVtwRy/VME9fqeNCNoNqc7w=", + "dev": true, + "requires": { + "coffeescript": "1.10.0", + "dateformat": "1.0.12", + "eventemitter2": "0.4.14", + "exit": "0.1.2", + "findup-sync": "0.3.0", + "glob": "7.0.6", + "grunt-cli": "1.2.0", + "grunt-known-options": "1.1.0", + "grunt-legacy-log": "1.0.1", + "grunt-legacy-util": "1.0.0", + "iconv-lite": "0.4.19", + "js-yaml": "3.5.5", + "minimatch": "3.0.4", + "nopt": "3.0.6", + "path-is-absolute": "1.0.1", + "rimraf": "2.2.8" + }, + "dependencies": { + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1.1.1" + } + } + } + }, + "grunt-cli": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/grunt-cli/-/grunt-cli-1.2.0.tgz", + "integrity": "sha1-VisRnrsGndtGSs4oRVAb6Xs1tqg=", + "dev": true, + "requires": { + "findup-sync": "0.3.0", + "grunt-known-options": "1.1.0", + "nopt": "3.0.6", + "resolve": "1.1.7" + }, + "dependencies": { + "findup-sync": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-0.3.0.tgz", + "integrity": "sha1-N5MKpdgWt3fANEXhlmzGeQpMCxY=", + "dev": true, + "requires": { + "glob": "5.0.15" + } + }, + "glob": { + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, + "requires": { + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1.1.1" + } + } + } + }, + "grunt-contrib-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/grunt-contrib-concat/-/grunt-contrib-concat-1.0.1.tgz", + "integrity": "sha1-YVCYYwhOhx1+ht5IwBUlntl3Rb0=", + "dev": true, + "requires": { + "chalk": "1.1.3", + "source-map": "0.5.7" + } + }, + "grunt-contrib-jshint": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/grunt-contrib-jshint/-/grunt-contrib-jshint-1.1.0.tgz", + "integrity": "sha1-Np2QmyWTxA6L55lAshNAhQx5Oaw=", + "dev": true, + "requires": { + "chalk": "1.1.3", + "hooker": "0.2.3", + "jshint": "2.9.5" + } + }, + "grunt-contrib-qunit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/grunt-contrib-qunit/-/grunt-contrib-qunit-1.3.0.tgz", + "integrity": "sha1-naxijP1OyBWZhjPbc7Ur2z3byZ4=", + "dev": true, + "requires": { + "grunt-lib-phantomjs": "1.1.0" + } + }, + "grunt-contrib-uglify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/grunt-contrib-uglify/-/grunt-contrib-uglify-1.0.2.tgz", + "integrity": "sha1-rmekb5FT7dTLEYE6Vetpxw19svs=", + "dev": true, + "requires": { + "chalk": "1.1.3", + "lodash": "4.17.5", + "maxmin": "1.1.0", + "uglify-js": "2.6.4", + "uri-path": "1.0.0" + }, + "dependencies": { + "lodash": { + "version": "4.17.5", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.5.tgz", + "integrity": "sha512-svL3uiZf1RwhH+cWrfZn3A4+U58wbP0tGVTLQPbjplZxZ8ROD9VLuNgsRniTlLe7OlSqR79RUehXgpBW/s0IQw==", + "dev": true + } + } + }, + "grunt-jsdoc": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/grunt-jsdoc/-/grunt-jsdoc-2.2.1.tgz", + "integrity": "sha512-33QZYBYjv2Ph3H2ygqXHn/o0ttfptw1f9QciOTgvzhzUeiPrnvzMNUApTPtw22T6zgReE5FZ1RR58U2wnK/l+w==", + "dev": true, + "requires": { + "cross-spawn": "3.0.1", + "jsdoc": "3.5.5", + "marked": "0.3.17" + } + }, + "grunt-known-options": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/grunt-known-options/-/grunt-known-options-1.1.0.tgz", + "integrity": "sha1-pCdO6zL6dl2lp6OxcSYXzjsUQUk=", + "dev": true + }, + "grunt-legacy-log": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/grunt-legacy-log/-/grunt-legacy-log-1.0.1.tgz", + "integrity": "sha512-rwuyqNKlI0IPz0DvxzJjcEiQEBaBNVeb1LFoZKxSmHLETFUwhwUrqOsPIxURTKSwNZHZ4ht1YLBYmVU0YZAzHQ==", + "dev": true, + "requires": { + "colors": "1.1.2", + "grunt-legacy-log-utils": "1.0.0", + "hooker": "0.2.3", + "lodash": "4.17.5", + "underscore.string": "3.3.4" + } + }, + "grunt-legacy-log-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/grunt-legacy-log-utils/-/grunt-legacy-log-utils-1.0.0.tgz", + "integrity": "sha1-p7ji0Ps1taUPSvmG/BEnSevJbz0=", + "dev": true, + "requires": { + "chalk": "1.1.3", + "lodash": "4.3.0" + }, + "dependencies": { + "lodash": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.3.0.tgz", + "integrity": "sha1-79nEpuxT87BUEkKZFcPkgk5NJaQ=", + "dev": true + } + } + }, + "grunt-legacy-util": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/grunt-legacy-util/-/grunt-legacy-util-1.0.0.tgz", + "integrity": "sha1-OGqnjcbtUJhsKxiVcmWxtIq7m4Y=", + "dev": true, + "requires": { + "async": "1.5.2", + "exit": "0.1.2", + "getobject": "0.1.0", + "hooker": "0.2.3", + "lodash": "4.3.0", + "underscore.string": "3.2.3", + "which": "1.2.14" + }, + "dependencies": { + "lodash": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.3.0.tgz", + "integrity": "sha1-79nEpuxT87BUEkKZFcPkgk5NJaQ=", + "dev": true + }, + "underscore.string": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-3.2.3.tgz", + "integrity": "sha1-gGmSYzZl1eX8tNsfs6hi62jp5to=", + "dev": true + } + } + }, + "grunt-lib-phantomjs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/grunt-lib-phantomjs/-/grunt-lib-phantomjs-1.1.0.tgz", + "integrity": "sha1-np7c3Z/S3UDgwYHJQ3HVcqpe6tI=", + "dev": true, + "requires": { + "eventemitter2": "0.4.14", + "phantomjs-prebuilt": "2.1.16", + "rimraf": "2.6.2", + "semver": "5.5.0", + "temporary": "0.0.8" + }, + "dependencies": { + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha1-wZyd+aAocC1nhhI4SmVSQExjbRU=", + "dev": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "rimraf": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", + "integrity": "sha1-LtgVDSShbqhlHm1u8PR8QVjOejY=", + "dev": true, + "requires": { + "glob": "7.1.2" + } + } + } + }, + "grunt-shell-spawn": { + "version": "0.3.10", + "resolved": "https://registry.npmjs.org/grunt-shell-spawn/-/grunt-shell-spawn-0.3.10.tgz", + "integrity": "sha1-gbuNRX7EfTGCqH1jCO+EXd+5SI8=", + "dev": true, + "requires": { + "grunt": "1.0.2", + "sync-exec": "0.6.2" + } + }, + "gzip-size": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-1.0.0.tgz", + "integrity": "sha1-Zs+LEBBHInuVus5uodoMF37Vwi8=", + "dev": true, + "requires": { + "browserify-zlib": "0.1.4", + "concat-stream": "1.6.0" + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", + "dev": true + }, + "har-validator": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.0.3.tgz", + "integrity": "sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0=", + "dev": true, + "requires": { + "ajv": "5.5.2", + "har-schema": "2.0.0" + } + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "dev": true, + "requires": { + "ansi-regex": "2.1.1" + } + }, + "hasha": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-2.2.0.tgz", + "integrity": "sha1-eNfL/B5tZjA/55g3NlmEUXsvbuE=", + "dev": true, + "requires": { + "is-stream": "1.1.0", + "pinkie-promise": "2.0.1" + } + }, + "hawk": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/hawk/-/hawk-6.0.2.tgz", + "integrity": "sha1-r02RTrBl+bXOTZ0RwcshJu7MMDg=", + "dev": true, + "requires": { + "boom": "4.3.1", + "cryptiles": "3.1.2", + "hoek": "4.2.1", + "sntp": "2.1.0" + } + }, + "hoek": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/hoek/-/hoek-4.2.1.tgz", + "integrity": "sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA==", + "dev": true + }, + "hooker": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/hooker/-/hooker-0.2.3.tgz", + "integrity": "sha1-uDT3I8xKJCqmWWNFnfbZhMXT2Vk=", + "dev": true + }, + "hosted-git-info": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.5.0.tgz", + "integrity": "sha1-bWDjSzq7yDEwYsO3mO+NkBoHrzw=", + "dev": true + }, + "htmlparser2": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.8.3.tgz", + "integrity": "sha1-mWwosZFRaovoZQGn15dX5ccMEGg=", + "dev": true, + "requires": { + "domelementtype": "1.3.0", + "domhandler": "2.3.0", + "domutils": "1.5.1", + "entities": "1.0.0", + "readable-stream": "1.1.14" + } + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "jsprim": "1.4.1", + "sshpk": "1.13.1" + } + }, + "iconv-lite": { + "version": "0.4.19", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz", + "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==", + "dev": true + }, + "indent-string": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-2.1.0.tgz", + "integrity": "sha1-ji1INIdCEhtKghi3oTfppSBJ3IA=", + "dev": true, + "requires": { + "repeating": "2.0.1" + } + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "1.4.0", + "wrappy": "1.0.2" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", + "dev": true + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha1-76ouqdqg16suoTqXsritUf776L4=", + "dev": true + }, + "is-builtin-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-builtin-module/-/is-builtin-module-1.0.0.tgz", + "integrity": "sha1-VAVy0096wxGfj3bDDLwbHgN6/74=", + "dev": true, + "requires": { + "builtin-modules": "1.1.1" + } + }, + "is-finite": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.0.2.tgz", + "integrity": "sha1-zGZ3aVYCvlUO8R6LSqYwU0K20Ko=", + "dev": true, + "requires": { + "number-is-nan": "1.0.1" + } + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "is-utf8": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=", + "dev": true + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", + "dev": true + }, + "js-yaml": { + "version": "3.5.5", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.5.5.tgz", + "integrity": "sha1-A3fDgBfKvHMisNH7zSWkkWQfL74=", + "dev": true, + "requires": { + "argparse": "1.0.10", + "esprima": "2.7.3" + } + }, + "js2xmlparser": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-3.0.0.tgz", + "integrity": "sha1-P7YOqgicVED5MZ9RdgzNB+JJlzM=", + "requires": { + "xmlcreate": "1.0.2" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", + "dev": true, + "optional": true + }, + "jsdoc": { + "version": "3.5.5", + "resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-3.5.5.tgz", + "integrity": "sha1-SEUhsSboGQTWMv+D7JqqCWcI+k0=", + "requires": { + "babylon": "7.0.0-beta.19", + "bluebird": "3.5.1", + "catharsis": "0.8.9", + "escape-string-regexp": "1.0.5", + "js2xmlparser": "3.0.0", + "klaw": "2.0.0", + "marked": "0.3.17", + "mkdirp": "0.5.1", + "requizzle": "0.2.1", + "strip-json-comments": "2.0.1", + "taffydb": "2.6.2", + "underscore": "1.8.3" + } + }, + "jshint": { + "version": "2.9.5", + "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.9.5.tgz", + "integrity": "sha1-HnJSkVzmgbQIJ+4UJIxG006apiw=", + "dev": true, + "requires": { + "cli": "1.0.1", + "console-browserify": "1.1.0", + "exit": "0.1.2", + "htmlparser2": "3.8.3", + "lodash": "3.7.0", + "minimatch": "3.0.4", + "shelljs": "0.3.0", + "strip-json-comments": "1.0.4" + }, + "dependencies": { + "lodash": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-3.7.0.tgz", + "integrity": "sha1-Nni9irmVBXwHreg27S7wh9qBHUU=", + "dev": true + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha1-UWbihkV/AzBgZL5Ul+jbsMPTIIM=", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "strip-json-comments": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-1.0.4.tgz", + "integrity": "sha1-HhX7ysl9Pumb8tc7TGVrCCu6+5E=", + "dev": true + } + } + }, + "jslint": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/jslint/-/jslint-0.12.0.tgz", + "integrity": "sha512-RoCsyICcKA+6TFsbys9DpKTfPVaC71Mm5QSjvrWA0lDVN+LIvx6apa42FFisMqmCTvJ8DxkcoQGJ0j7m3kTVow==", + "dev": true, + "requires": { + "exit": "0.1.2", + "glob": "7.1.2", + "nopt": "3.0.6", + "readable-stream": "2.1.5" + }, + "dependencies": { + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dev": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1.1.1" + } + }, + "process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=", + "dev": true + }, + "readable-stream": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.1.5.tgz", + "integrity": "sha1-ZvqLcg4UOLNkaB8q0aY8YYRIydA=", + "dev": true, + "requires": { + "buffer-shims": "1.0.0", + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "1.0.7", + "string_decoder": "0.10.31", + "util-deprecate": "1.0.2" + } + } + } + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", + "dev": true + }, + "json-schema-traverse": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz", + "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "jsonfile": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz", + "integrity": "sha1-NzaitCi4e72gzIO1P6PWM6NcKug=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "kew": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/kew/-/kew-0.7.0.tgz", + "integrity": "sha1-edk9LTM2PW/dKXCzNdkUGtWR15s=", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "1.1.6" + } + }, + "klaw": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-2.0.0.tgz", + "integrity": "sha1-WcEo4Nxc5BAgEVEZTuucv4WGUPY=", + "requires": { + "graceful-fs": "4.1.11" + } + }, + "lazy-cache": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz", + "integrity": "sha1-odePw6UEdMuAhF07O24dpJpEbo4=", + "dev": true + }, + "load-json-file": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", + "integrity": "sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "parse-json": "2.2.0", + "pify": "2.3.0", + "pinkie-promise": "2.0.1", + "strip-bom": "2.0.0" + } + }, + "lodash": { + "version": "4.17.5", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.5.tgz", + "integrity": "sha512-svL3uiZf1RwhH+cWrfZn3A4+U58wbP0tGVTLQPbjplZxZ8ROD9VLuNgsRniTlLe7OlSqR79RUehXgpBW/s0IQw==", + "dev": true + }, + "longest": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", + "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc=", + "dev": true + }, + "loud-rejection": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/loud-rejection/-/loud-rejection-1.6.0.tgz", + "integrity": "sha1-W0b4AUft7leIcPCG0Eghz5mOVR8=", + "dev": true, + "requires": { + "currently-unhandled": "0.4.1", + "signal-exit": "3.0.2" + } + }, + "map-obj": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", + "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", + "dev": true + }, + "marked": { + "version": "0.3.17", + "resolved": "https://registry.npmjs.org/marked/-/marked-0.3.17.tgz", + "integrity": "sha512-+AKbNsjZl6jFfLPwHhWmGTqE009wTKn3RTmn9K8oUKHrX/abPJjtcRtXpYB/FFrwPJRUA86LX/de3T0knkPCmQ==" + }, + "maxmin": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/maxmin/-/maxmin-1.1.0.tgz", + "integrity": "sha1-cTZehKmd2Piz99X94vANHn9zvmE=", + "dev": true, + "requires": { + "chalk": "1.1.3", + "figures": "1.7.0", + "gzip-size": "1.0.0", + "pretty-bytes": "1.0.4" + } + }, + "meow": { + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/meow/-/meow-3.7.0.tgz", + "integrity": "sha1-cstmi0JSKCkKu/qFaJJYcwioAfs=", + "dev": true, + "requires": { + "camelcase-keys": "2.1.0", + "decamelize": "1.2.0", + "loud-rejection": "1.6.0", + "map-obj": "1.0.1", + "minimist": "1.2.0", + "normalize-package-data": "2.4.0", + "object-assign": "4.1.1", + "read-pkg-up": "1.0.1", + "redent": "1.0.0", + "trim-newlines": "1.0.0" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + } + } + }, + "mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "dev": true + }, + "mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dev": true, + "requires": { + "mime-db": "1.33.0" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "requires": { + "minimist": "0.0.8" + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "nopt": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.1.tgz", + "integrity": "sha1-0NRoWv1UFRk8jHUFYC0NF81kR00=", + "requires": { + "abbrev": "1.1.1", + "osenv": "0.1.5" + } + }, + "normalize-package-data": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.4.0.tgz", + "integrity": "sha1-EvlaMH1YNSB1oEkHuErIvpisAS8=", + "dev": true, + "requires": { + "hosted-git-info": "2.5.0", + "is-builtin-module": "1.0.0", + "semver": "5.5.0", + "validate-npm-package-license": "3.0.3" + } + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true + }, + "oauth-sign": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.8.2.tgz", + "integrity": "sha1-Rqarfwrq2N6unsBWV4C31O/rnUM=", + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1.0.2" + } + }, + "os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=" + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=" + }, + "osenv": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz", + "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==", + "requires": { + "os-homedir": "1.0.2", + "os-tmpdir": "1.0.2" + } + }, + "package": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package/-/package-1.0.1.tgz", + "integrity": "sha1-0lofmeJQbcsn1nBLg9yooxLk7cw=", + "dev": true + }, + "pako": { + "version": "0.2.9", + "resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz", + "integrity": "sha1-8/dSL073gjSNqBYbrZ7P1Rv4OnU=", + "dev": true + }, + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "dev": true, + "requires": { + "error-ex": "1.3.1" + } + }, + "path-exists": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-2.1.0.tgz", + "integrity": "sha1-D+tsZPD8UY2adU3V77YscCJ2H0s=", + "dev": true, + "requires": { + "pinkie-promise": "2.0.1" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-type": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", + "integrity": "sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "pify": "2.3.0", + "pinkie-promise": "2.0.1" + } + }, + "pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", + "dev": true + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", + "dev": true + }, + "phantomjs-prebuilt": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/phantomjs-prebuilt/-/phantomjs-prebuilt-2.1.16.tgz", + "integrity": "sha1-79ISpKOWbTZHaE6ouniFSb4q7+8=", + "dev": true, + "requires": { + "es6-promise": "4.2.4", + "extract-zip": "1.6.6", + "fs-extra": "1.0.0", + "hasha": "2.2.0", + "kew": "0.7.0", + "progress": "1.1.8", + "request": "2.83.0", + "request-progress": "2.0.1", + "which": "1.3.0" + }, + "dependencies": { + "which": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.0.tgz", + "integrity": "sha1-/wS9/AEO5UfXgL7DjhrBwnd9JTo=", + "dev": true, + "requires": { + "isexe": "2.0.0" + } + } + } + }, + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", + "dev": true + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", + "dev": true + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "dev": true, + "requires": { + "pinkie": "2.0.4" + } + }, + "pretty-bytes": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-1.0.4.tgz", + "integrity": "sha1-CiLoIQYJrTVUL4yNXSFZr/B1HIQ=", + "dev": true, + "requires": { + "get-stdin": "4.0.1", + "meow": "3.7.0" + } + }, + "process-nextick-args": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", + "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==", + "dev": true + }, + "progress": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/progress/-/progress-1.1.8.tgz", + "integrity": "sha1-4mDHj2Fhzdmw5WzD4Khd4Xx6V74=", + "dev": true + }, + "pseudomap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", + "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", + "dev": true + }, + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + }, + "qs": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", + "integrity": "sha1-NJzfbu+J7EXBLX1es/wMhwNDptg=", + "dev": true + }, + "read-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", + "integrity": "sha1-9f+qXs0pyzHAR0vKfXVra7KePyg=", + "dev": true, + "requires": { + "load-json-file": "1.1.0", + "normalize-package-data": "2.4.0", + "path-type": "1.1.0" + } + }, + "read-pkg-up": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-1.0.1.tgz", + "integrity": "sha1-nWPBMnbAZZGNV/ACpX9AobZD+wI=", + "dev": true, + "requires": { + "find-up": "1.1.2", + "read-pkg": "1.1.0" + } + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "redent": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-1.0.0.tgz", + "integrity": "sha1-z5Fqsf1fHxbfsggi3W7H9zDCr94=", + "dev": true, + "requires": { + "indent-string": "2.1.0", + "strip-indent": "1.0.1" + } + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "repeating": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz", + "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=", + "dev": true, + "requires": { + "is-finite": "1.0.2" + } + }, + "request": { + "version": "2.83.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.83.0.tgz", + "integrity": "sha1-ygtl2gLtYpNYh4COb1EDgQNOM1Y=", + "dev": true, + "requires": { + "aws-sign2": "0.7.0", + "aws4": "1.6.0", + "caseless": "0.12.0", + "combined-stream": "1.0.6", + "extend": "3.0.1", + "forever-agent": "0.6.1", + "form-data": "2.3.2", + "har-validator": "5.0.3", + "hawk": "6.0.2", + "http-signature": "1.2.0", + "is-typedarray": "1.0.0", + "isstream": "0.1.2", + "json-stringify-safe": "5.0.1", + "mime-types": "2.1.18", + "oauth-sign": "0.8.2", + "performance-now": "2.1.0", + "qs": "6.5.1", + "safe-buffer": "5.1.1", + "stringstream": "0.0.5", + "tough-cookie": "2.3.4", + "tunnel-agent": "0.6.0", + "uuid": "3.2.1" + } + }, + "request-progress": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-2.0.1.tgz", + "integrity": "sha1-XTa7V5YcZzqlt4jbyBQf3yO0Tgg=", + "dev": true, + "requires": { + "throttleit": "1.0.0" + } + }, + "requizzle": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.1.tgz", + "integrity": "sha1-aUPDUwxNmn5G8c3dUcFY/GcM294=", + "requires": { + "underscore": "1.6.0" + }, + "dependencies": { + "underscore": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.6.0.tgz", + "integrity": "sha1-izixDKze9jM3uLJOT/htRa6lKag=" + } + } + }, + "resolve": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", + "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", + "dev": true + }, + "right-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/right-align/-/right-align-0.1.3.tgz", + "integrity": "sha1-YTObci/mo1FWiSENJOFMlhSGE+8=", + "dev": true, + "requires": { + "align-text": "0.1.4" + } + }, + "rimraf": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.2.8.tgz", + "integrity": "sha1-5Dm+Kq7jJzIZUnMPmaiSnk/FBYI=", + "dev": true + }, + "safe-buffer": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", + "integrity": "sha1-iTMSr2myEj3vcfV4iQAWce6yyFM=", + "dev": true + }, + "semver": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", + "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==", + "dev": true + }, + "shelljs": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.3.0.tgz", + "integrity": "sha1-NZbmMHp4FUT1kfN9phg2DzHbV7E=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "sntp": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/sntp/-/sntp-2.1.0.tgz", + "integrity": "sha1-LGzsFP7cIiJznK+bXD2F0cxaLMg=", + "dev": true, + "requires": { + "hoek": "4.2.1" + } + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true + }, + "spdx-correct": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.0.0.tgz", + "integrity": "sha512-N19o9z5cEyc8yQQPukRCZ9EUmb4HUpnrmaL/fxS2pBo2jbfcFRVuFZ/oFC+vZz0MNNk0h80iMn5/S6qGZOL5+g==", + "dev": true, + "requires": { + "spdx-expression-parse": "3.0.0", + "spdx-license-ids": "3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz", + "integrity": "sha512-4K1NsmrlCU1JJgUrtgEeTVyfx8VaYea9J9LvARxhbHtVtohPs/gFGG5yy49beySjlIMhhXZ4QqujIZEfS4l6Cg==", + "dev": true + }, + "spdx-expression-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", + "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "dev": true, + "requires": { + "spdx-exceptions": "2.1.0", + "spdx-license-ids": "3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz", + "integrity": "sha512-2+EPwgbnmOIl8HjGBXXMd9NAu02vLjOO1nWw4kmeRDFyHn+M/ETfHxQUK0oXg8ctgVnl9t3rosNVsZ1jG61nDA==", + "dev": true + }, + "sprintf-js": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.1.tgz", + "integrity": "sha1-Nr54Mgr+WAH2zqPueLblqrlA6gw=", + "dev": true + }, + "sshpk": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.13.1.tgz", + "integrity": "sha1-US322mKHFEMW3EwY/hzx2UBzm+M=", + "dev": true, + "requires": { + "asn1": "0.2.3", + "assert-plus": "1.0.0", + "bcrypt-pbkdf": "1.0.1", + "dashdash": "1.14.1", + "ecc-jsbn": "0.1.1", + "getpass": "0.1.7", + "jsbn": "0.1.1", + "tweetnacl": "0.14.5" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + }, + "stringstream": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.5.tgz", + "integrity": "sha1-TkhM1N5aC7vuGORjB3EKioFiGHg=", + "dev": true + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "2.1.1" + } + }, + "strip-bom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", + "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=", + "dev": true, + "requires": { + "is-utf8": "0.2.1" + } + }, + "strip-indent": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-1.0.1.tgz", + "integrity": "sha1-DHlipq3vp7vUrDZkYKY4VSrhoKI=", + "dev": true, + "requires": { + "get-stdin": "4.0.1" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true + }, + "sync-exec": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/sync-exec/-/sync-exec-0.6.2.tgz", + "integrity": "sha1-cX0izFPwzh3vVZQ2LzqJouu5EQU=", + "dev": true + }, + "taffydb": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/taffydb/-/taffydb-2.6.2.tgz", + "integrity": "sha1-fLy2S1oUG2ou/CxdLGe04VCyomg=" + }, + "temporary": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/temporary/-/temporary-0.0.8.tgz", + "integrity": "sha1-oYqYHSi6jKNgJ/s8MFOMPst0CsA=", + "dev": true, + "requires": { + "package": "1.0.1" + } + }, + "throttleit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz", + "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=", + "dev": true + }, + "tough-cookie": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.4.tgz", + "integrity": "sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA==", + "dev": true, + "requires": { + "punycode": "1.4.1" + } + }, + "trim-newlines": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-1.0.0.tgz", + "integrity": "sha1-WIeWa7WCpFA6QetST301ARgVphM=", + "dev": true + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "dev": true, + "requires": { + "safe-buffer": "5.1.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", + "dev": true, + "optional": true + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "uglify-js": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.6.4.tgz", + "integrity": "sha1-ZeovswWck5RpLxX+2HwrNsFrmt8=", + "dev": true, + "requires": { + "async": "0.2.10", + "source-map": "0.5.7", + "uglify-to-browserify": "1.0.2", + "yargs": "3.10.0" + }, + "dependencies": { + "async": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=", + "dev": true + } + } + }, + "uglify-to-browserify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz", + "integrity": "sha1-bgkk1r2mta/jSeOabWMoUKD4grc=", + "dev": true + }, + "underscore": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", + "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=" + }, + "underscore-contrib": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/underscore-contrib/-/underscore-contrib-0.3.0.tgz", + "integrity": "sha1-ZltmwkeD+PorGMn4y7Dix9SMJsc=", + "requires": { + "underscore": "1.6.0" + }, + "dependencies": { + "underscore": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.6.0.tgz", + "integrity": "sha1-izixDKze9jM3uLJOT/htRa6lKag=" + } + } + }, + "underscore.string": { + "version": "3.3.4", + "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-3.3.4.tgz", + "integrity": "sha1-LCo/n4PmR2L9xF5s6sZRQoZCE9s=", + "dev": true, + "requires": { + "sprintf-js": "1.1.1", + "util-deprecate": "1.0.2" + } + }, + "uri-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/uri-path/-/uri-path-1.0.0.tgz", + "integrity": "sha1-l0fwGDWJM8Md4PzP2C0TjmcmLjI=", + "dev": true + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "uuid": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.2.1.tgz", + "integrity": "sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA==", + "dev": true + }, + "validate-npm-package-license": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz", + "integrity": "sha512-63ZOUnL4SIXj4L0NixR3L1lcjO38crAbgrTpl28t8jjrfuiOBL5Iygm+60qPs/KsZGzPNg6Smnc/oY16QTjF0g==", + "dev": true, + "requires": { + "spdx-correct": "3.0.0", + "spdx-expression-parse": "3.0.0" + } + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "1.3.0" + } + }, + "which": { + "version": "1.2.14", + "resolved": "https://registry.npmjs.org/which/-/which-1.2.14.tgz", + "integrity": "sha1-mofEN48D6CfOyvGs31bHNsAcFOU=", + "dev": true, + "requires": { + "isexe": "2.0.0" + } + }, + "window-size": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.0.tgz", + "integrity": "sha1-VDjNLqk7IC76Ohn+iIeu58lPnJ0=", + "dev": true + }, + "wordwrap": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "integrity": "sha1-t5Zpu0LstAn4PVg8rVLKF+qhZD8=", + "dev": true + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "xmlcreate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-1.0.2.tgz", + "integrity": "sha1-+mv3YqYKQT+z3Y9LA8WyaSONMI8=" + }, + "yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", + "dev": true + }, + "yargs": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.10.0.tgz", + "integrity": "sha1-9+572FfdfB0tOMDnTvvWgdFDH9E=", + "dev": true, + "requires": { + "camelcase": "1.2.1", + "cliui": "2.1.0", + "decamelize": "1.2.0", + "window-size": "0.1.0" + }, + "dependencies": { + "camelcase": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", + "integrity": "sha1-m7UwTS4LVmmLLHWLCKPqqdqlijk=", + "dev": true + } + } + }, + "yauzl": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.4.1.tgz", + "integrity": "sha1-lSj0QtqxsihOWLQ3m7GU4i4MQAU=", + "dev": true, + "requires": { + "fd-slicer": "1.0.1" + } + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/js/package.json b/vendor/git.apache.org/thrift.git/lib/js/package.json index 421de933e..f3ed3e1e8 100644 --- a/vendor/git.apache.org/thrift.git/lib/js/package.json +++ b/vendor/git.apache.org/thrift.git/lib/js/package.json @@ -1,16 +1,16 @@ { "name": "thrift", - "version": "0.11.0", + "version": "1.0.0", "devDependencies": { - "grunt": "^0.4.5", + "grunt": "^1.0.2", "grunt-cli": "^1.2.0", "grunt-contrib-concat": "^1.0.1", "grunt-contrib-jshint": "^1.0.0", "grunt-contrib-qunit": "^1.2.0", "grunt-contrib-uglify": "^1.0.1", - "grunt-external-daemon": "^1.1.0", - "grunt-jsdoc": "^2.2.0", - "grunt-shell": "^1.3.0" + "grunt-jsdoc": "^2.2.1", + "grunt-shell-spawn": "^0.3.10", + "jslint": "^0.12.0" }, "dependencies": { "jsdoc": "^3.5.5", diff --git a/vendor/git.apache.org/thrift.git/lib/js/src/thrift.js b/vendor/git.apache.org/thrift.git/lib/js/src/thrift.js index d308c46dc..2b385a3a7 100644 --- a/vendor/git.apache.org/thrift.git/lib/js/src/thrift.js +++ b/vendor/git.apache.org/thrift.git/lib/js/src/thrift.js @@ -46,7 +46,7 @@ var Thrift = { * @const {string} Version * @memberof Thrift */ - Version: '0.11.0', + Version: '1.0.0-dev', /** * Thrift IDL type string to Id mapping. @@ -572,18 +572,11 @@ Thrift.TWebSocketTransport.prototype = { var clientCallback = callback; return function(msg) { self.setRecvBuffer(msg); - clientCallback(); + if (clientCallback) { + clientCallback(); + } }; }())); - if(callback) { - this.callbacks.push((function() { - var clientCallback = callback; - return function(msg) { - self.setRecvBuffer(msg); - clientCallback(); - }; - }())); - } } else { //Queue the send to go out __onOpen this.send_pending.push({ @@ -599,8 +592,8 @@ Thrift.TWebSocketTransport.prototype = { //If the user made calls before the connection was fully //open, send them now this.send_pending.forEach(function(elem) { - this.socket.send(elem.buf); - this.callbacks.push((function() { + self.socket.send(elem.buf); + self.callbacks.push((function() { var clientCallback = elem.cb; return function(msg) { self.setRecvBuffer(msg); @@ -1265,7 +1258,12 @@ Thrift.Protocol.prototype = { /** Deserializes the end of a list. */ readListEnd: function() { - this.readFieldEnd(); + var pos = this.rpos.pop() - 2; + var st = this.rstack; + st.pop(); + if (st instanceof Array && st.length > pos && st[pos].length > 0) { + st.push(st[pos].shift()); + } }, /** @@ -1440,6 +1438,9 @@ Thrift.Protocol.prototype = { } this.readListEnd(); return null; + + default: + throw new Thrift.TProtocolException(Thrift.TProtocolExceptionType.INVALID_DATA); } } }; diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/build.properties b/vendor/git.apache.org/thrift.git/lib/js/test/build.properties new file mode 100644 index 000000000..84636683c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/js/test/build.properties @@ -0,0 +1,5 @@ +# Maven Ant tasks Jar details +mvn.ant.task.version=2.1.3 +mvn.repo=http://repo1.maven.org/maven2 +mvn.ant.task.url=${mvn.repo}/org/apache/maven/maven-ant-tasks/${mvn.ant.task.version} +mvn.ant.task.jar=maven-ant-tasks-${mvn.ant.task.version}.jar diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/build.xml b/vendor/git.apache.org/thrift.git/lib/js/test/build.xml index a905fde5b..0ba382891 100755 --- a/vendor/git.apache.org/thrift.git/lib/js/test/build.xml +++ b/vendor/git.apache.org/thrift.git/lib/js/test/build.xml @@ -31,17 +31,21 @@ + + - + - - + + + + - + @@ -59,15 +63,16 @@ - + + You need libthrift*.jar and libthrift*test.jar located at - ${thrift.java.dir}/build + ${thrift.java.dir}/build/libs Did you compile Thrift Java library and its test suite by "ant compile-test"? @@ -84,6 +89,7 @@ + @@ -92,10 +98,14 @@ - + + + + + @@ -161,6 +171,9 @@ + + + @@ -207,7 +220,17 @@ - + + + + + + + + + + + diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/deep-constructor.test.js b/vendor/git.apache.org/thrift.git/lib/js/test/deep-constructor.test.js index 336fc15ac..f349e466f 100644 --- a/vendor/git.apache.org/thrift.git/lib/js/test/deep-constructor.test.js +++ b/vendor/git.apache.org/thrift.git/lib/js/test/deep-constructor.test.js @@ -61,7 +61,13 @@ function createThriftObj() { DB: new Simple({value: 'k'}) } ] - } + }, + + list_of_list_field: [ + ['one', 'two'], + ['three', 'four'], + ['five', 'six'] + ] } ); } @@ -108,7 +114,13 @@ function createJsObj() { DB: {value: 'k'} } ] - } + }, + + list_of_list_field: [ + ['one', 'two'], + ['three', 'four'], + ['five', 'six'] + ] }; } @@ -125,6 +137,12 @@ function assertValues(obj, assert) { assert.equal(obj.struct_nested_containers_field[0][0].C[1].value, 'i'); assert.equal(obj.struct_nested_containers_field2.D[0].DA.value, 'j'); assert.equal(obj.struct_nested_containers_field2.D[1].DB.value, 'k'); + assert.equal(obj.list_of_list_field[0][0], 'one'); + assert.equal(obj.list_of_list_field[0][1], 'two'); + assert.equal(obj.list_of_list_field[1][0], 'three'); + assert.equal(obj.list_of_list_field[1][1], 'four'); + assert.equal(obj.list_of_list_field[2][0], 'five'); + assert.equal(obj.list_of_list_field[2][1], 'six'); } var cases = { diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/jsTestDriver.conf b/vendor/git.apache.org/thrift.git/lib/js/test/jsTestDriver.conf index b9702cd3a..eb1588c82 100755 --- a/vendor/git.apache.org/thrift.git/lib/js/test/jsTestDriver.conf +++ b/vendor/git.apache.org/thrift.git/lib/js/test/jsTestDriver.conf @@ -7,6 +7,7 @@ load: # dependencies - build/js/lib/jquery.js - build/js/thrift.js + - gen-js/DoubleConstantsTest_constants.js - gen-js/ThriftTest_types.js - gen-js/ThriftTest.js # the test suite diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/test-async.js b/vendor/git.apache.org/thrift.git/lib/js/test/test-async.js index b56f2a2c3..b4e985426 100644 --- a/vendor/git.apache.org/thrift.git/lib/js/test/test-async.js +++ b/vendor/git.apache.org/thrift.git/lib/js/test/test-async.js @@ -345,4 +345,12 @@ module('Insanity'); }); }); +module('Oneway'); + asyncTest('testOneway', function() { + expect(1); + client.testOneway(1, function(result) { + equal(result, undefined); + QUnit.start(); + }); + }); \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/test-deep-constructor.html b/vendor/git.apache.org/thrift.git/lib/js/test/test-deep-constructor.html index 5835dc84b..4c5fb02e4 100755 --- a/vendor/git.apache.org/thrift.git/lib/js/test/test-deep-constructor.html +++ b/vendor/git.apache.org/thrift.git/lib/js/test/test-deep-constructor.html @@ -25,11 +25,11 @@ - + - - + + diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.html b/vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.html new file mode 100644 index 000000000..240cb3946 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.html @@ -0,0 +1,55 @@ ++ + + + + + Rendering Double Constants in JS: Unit Test + + + + + + + + + + + + + + + + + + +

Rendering Double Constants in JS: Unit Test

+

+
+

+
+ + + diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.js b/vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.js new file mode 100644 index 000000000..5d9cd2a8d --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/js/test/test-double-rendering.js @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /* jshint -W100 */ + +/* + * JavaScript test suite for double constants inside + * DebugProtoTest.thrift. These tests will run against Normal (-gen js) + * Apache Thrift interfaces. + * + * Synchronous blocking calls should be identical in both + * Normal and jQuery interfaces. All synchronous tests belong + * here. + * + * Asynchronous success callbacks passed as the last parameter + * of an RPC call should be identical in both Normal and jQuery + * interfaces. Async success tests belong here. + * + * Asynchronous exception processing is different in Normal + * and jQuery interfaces. Such tests belong in the test-nojq.js + * or test-jq.js files respectively. jQuery specific XHR object + * tests also belong in test-jq.js. Do not create any jQuery + * dependencies in this file or in test-nojq.js + * + * To compile client code for this test use: + * $ thrift -gen js ThriftTest.thrift + * $ thrift -gen js DebugProtoTest.thrift + * + * See also: + * ++ test-nojq.js for "-gen js" only tests + */ + +// double assertion threshold +var EPSILON = 0.0000001; + +// Work around for old API used by QUnitAdapter of jsTestDriver +if (typeof QUnit.log == 'function') { + // When using real QUnit (fron PhantomJS) log failures to console + QUnit.log(function(details) { + if (!details.result) { + console.log('======== FAIL ========'); + console.log('TestName: ' + details.name); + if (details.message) console.log(details.message); + console.log('Expected: ' + details.expected); + console.log('Actual : ' + details.actual); + console.log('======================'); + } + }); +} + +QUnit.module('Double rendering'); + + QUnit.test('Double (rendering)', function(assert) { + console.log('Double rendering test -- starts'); + var EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1; + var EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100; + var EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807; + var EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807; + var EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359; + var EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1; + var EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1; + var EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308; + var EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43; + var EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308; + var EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43; + assert.ok( + Math.abs(EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT - DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT - + DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT - + DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT - + DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS - + DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE - + DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE - + DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE - + DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE - + DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE - + DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST) <= EPSILON); + assert.ok( + Math.abs( + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE - + DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST) <= EPSILON); + assert.equal(typeof DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, 'number'); + assert.equal(typeof DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, 'number'); + var EXPECTED_DOUBLE_LIST = + [1,-100,100,9223372036854775807,-9223372036854775807,3.14159265359,1000000.1,-1000000.1,1.7e+308,-1.7e+308, + 9223372036854775816.43,-9223372036854775816.43]; + assert.equal(DOUBLE_LIST_TEST.length, EXPECTED_DOUBLE_LIST.length); + for (var i = 0; i < EXPECTED_DOUBLE_LIST.length; ++i) { + assert.ok(Math.abs(EXPECTED_DOUBLE_LIST[i] - DOUBLE_LIST_TEST[i]) <= EPSILON); + } + console.log('Double rendering test -- ends'); + }); + diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/test-es6.html b/vendor/git.apache.org/thrift.git/lib/js/test/test-es6.html new file mode 100644 index 000000000..92d07386b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/js/test/test-es6.html @@ -0,0 +1,65 @@ + + + + + + Thrift Javascript Bindings: Unit Test + + + + + + + + + + + + + + + + + + + + +

Thrift Javascript Bindings: Unit Test (ThriftTest.thrift)

+

+
+

+
+ + + diff --git a/vendor/git.apache.org/thrift.git/lib/js/test/test-es6.js b/vendor/git.apache.org/thrift.git/lib/js/test/test-es6.js new file mode 100644 index 000000000..a3a31dc1c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/js/test/test-es6.js @@ -0,0 +1,354 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + /* jshint -W100 */ + +/* + * Fully Async JavaScript test suite for ThriftTest.thrift. + * These tests are designed to exercise the WebSocket transport + * (which is exclusively async). + * + * To compile client code for this test use: + * $ thrift -gen js ThriftTest.thrift + */ + + + +// all Languages in UTF-8 +var stringTest = "Afrikaans, Alemannisch, Aragonés, العربية, مصرى, Asturianu, Aymar aru, Azərbaycan, Башҡорт, Boarisch, Žemaitėška, Беларуская, Беларуская (тарашкевіца), Български, Bamanankan, বাংলা, Brezhoneg, Bosanski, Català, Mìng-dĕ̤ng-ngṳ̄, Нохчийн, Cebuano, ᏣᎳᎩ, Česky, Словѣ́ньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ, Чӑвашла, Cymraeg, Dansk, Zazaki, ދިވެހިބަސް, Ελληνικά, Emiliàn e rumagnòl, English, Esperanto, Español, Eesti, Euskara, فارسی, Suomi, Võro, Føroyskt, Français, Arpetan, Furlan, Frysk, Gaeilge, 贛語, Gàidhlig, Galego, Avañe'ẽ, ગુજરાતી, Gaelg, עברית, हिन्दी, Fiji Hindi, Hrvatski, Kreyòl ayisyen, Magyar, Հայերեն, Interlingua, Bahasa Indonesia, Ilokano, Ido, Íslenska, Italiano, 日本語, Lojban, Basa Jawa, ქართული, Kongo, Kalaallisut, ಕನ್ನಡ, 한국어, Къарачай-Малкъар, Ripoarisch, Kurdî, Коми, Kernewek, Кыргызча, Latina, Ladino, Lëtzebuergesch, Limburgs, Lingála, ລາວ, Lietuvių, Latviešu, Basa Banyumasan, Malagasy, Македонски, മലയാളം, मराठी, Bahasa Melayu, مازِرونی, Nnapulitano, Nedersaksisch, नेपाल भाषा, Nederlands, ‪Norsk (nynorsk)‬, ‪Norsk (bokmål)‬, Nouormand, Diné bizaad, Occitan, Иронау, Papiamentu, Deitsch, Norfuk / Pitkern, Polski, پنجابی, پښتو, Português, Runa Simi, Rumantsch, Romani, Română, Русский, Саха тыла, Sardu, Sicilianu, Scots, Sámegiella, Simple English, Slovenčina, Slovenščina, Српски / Srpski, Seeltersk, Svenska, Kiswahili, தமிழ், తెలుగు, Тоҷикӣ, ไทย, Türkmençe, Tagalog, Türkçe, Татарча/Tatarça, Українська, اردو, Tiếng Việt, Volapük, Walon, Winaray, 吴语, isiXhosa, ייִדיש, Yorùbá, Zeêuws, 中文, Bân-lâm-gú, 粵語"; + +function checkRecursively(map1, map2) { + if (typeof map1 !== 'function' && typeof map2 !== 'function') { + if (!map1 || typeof map1 !== 'object') { + equal(map1, map2); + } else { + for (var key in map1) { + checkRecursively(map1[key], map2[key]); + } + } + } +} + +module('Base Types'); + + asyncTest('Void', function() { + expect(1); + client.testVoid().then(function(result) { + equal(result, undefined); + QUnit.start(); + }); + }); + + asyncTest('String', function() { + expect(3); + QUnit.stop(2); + client.testString('').then(function(result) { + equal(result, ''); + QUnit.start(); + }); + client.testString(stringTest).then(function(result) { + equal(result, stringTest); + QUnit.start(); + }); + var specialCharacters = 'quote: \" backslash:' + + ' forwardslash-escaped: \/ ' + + ' backspace: \b formfeed: \f newline: \n return: \r tab: ' + + ' now-all-of-them-together: "\\\/\b\n\r\t' + + ' now-a-bunch-of-junk: !@#$%&()(&%$#{}{}<><><'; + client.testString(specialCharacters).then(function(result) { + equal(result, specialCharacters); + QUnit.start(); + }); + }); + + asyncTest('Double', function() { + expect(4); + QUnit.stop(3); + client.testDouble(0).then(function(result) { + equal(result, 0); + QUnit.start(); + }); + client.testDouble(-1).then(function(result) { + equal(result, -1); + QUnit.start(); + }); + client.testDouble(3.14).then(function(result) { + equal(result, 3.14); + QUnit.start(); + }); + client.testDouble(Math.pow(2, 60)).then(function(result) { + equal(result, Math.pow(2, 60)); + QUnit.start(); + }); + }); + // TODO: add testBinary() + asyncTest('Byte', function() { + expect(2); + QUnit.stop(); + client.testByte(0).then(function(result) { + equal(result, 0); + QUnit.start(); + }); + client.testByte(0x01).then(function(result) { + equal(result, 0x01); + QUnit.start(); + }); + }); + asyncTest('I32', function() { + expect(3); + QUnit.stop(2); + client.testI32(0).then(function(result) { + equal(result, 0); + QUnit.start(); + }); + client.testI32(Math.pow(2, 30)).then(function(result) { + equal(result, Math.pow(2, 30)); + QUnit.start(); + }); + client.testI32(-Math.pow(2, 30)).then(function(result) { + equal(result, -Math.pow(2, 30)); + QUnit.start(); + }); + }); + asyncTest('I64', function() { + expect(3); + QUnit.stop(2); + client.testI64(0).then(function(result) { + equal(result, 0); + QUnit.start(); + }); + //This is usually 2^60 but JS cannot represent anything over 2^52 accurately + client.testI64(Math.pow(2, 52)).then(function(result) { + equal(result, Math.pow(2, 52)); + QUnit.start(); + }); + client.testI64(-Math.pow(2, 52)).then(function(result) { + equal(result, -Math.pow(2, 52)); + QUnit.start(); + }); + }); + + +module('Structured Types'); + + asyncTest('Struct', function() { + expect(5); + var structTestInput = new ThriftTest.Xtruct(); + structTestInput.string_thing = 'worked'; + structTestInput.byte_thing = 0x01; + structTestInput.i32_thing = Math.pow(2, 30); + //This is usually 2^60 but JS cannot represent anything over 2^52 accurately + structTestInput.i64_thing = Math.pow(2, 52); + + client.testStruct(structTestInput).then(function(result) { + equal(result.string_thing, structTestInput.string_thing); + equal(result.byte_thing, structTestInput.byte_thing); + equal(result.i32_thing, structTestInput.i32_thing); + equal(result.i64_thing, structTestInput.i64_thing); + equal(JSON.stringify(result), JSON.stringify(structTestInput)); + QUnit.start(); + }); + }); + + asyncTest('Nest', function() { + expect(7); + var xtrTestInput = new ThriftTest.Xtruct(); + xtrTestInput.string_thing = 'worked'; + xtrTestInput.byte_thing = 0x01; + xtrTestInput.i32_thing = Math.pow(2, 30); + //This is usually 2^60 but JS cannot represent anything over 2^52 accurately + xtrTestInput.i64_thing = Math.pow(2, 52); + + var nestTestInput = new ThriftTest.Xtruct2(); + nestTestInput.byte_thing = 0x02; + nestTestInput.struct_thing = xtrTestInput; + nestTestInput.i32_thing = Math.pow(2, 15); + + client.testNest(nestTestInput).then(function(result) { + equal(result.byte_thing, nestTestInput.byte_thing); + equal(result.struct_thing.string_thing, nestTestInput.struct_thing.string_thing); + equal(result.struct_thing.byte_thing, nestTestInput.struct_thing.byte_thing); + equal(result.struct_thing.i32_thing, nestTestInput.struct_thing.i32_thing); + equal(result.struct_thing.i64_thing, nestTestInput.struct_thing.i64_thing); + equal(result.i32_thing, nestTestInput.i32_thing); + equal(JSON.stringify(result), JSON.stringify(nestTestInput)); + QUnit.start(); + }); + }); + + asyncTest('Map', function() { + expect(3); + var mapTestInput = {7: 77, 8: 88, 9: 99}; + + client.testMap(mapTestInput).then(function(result) { + for (var key in result) { + equal(result[key], mapTestInput[key]); + } + QUnit.start(); + }); + }); + + asyncTest('StringMap', function() { + expect(6); + var mapTestInput = { + 'a': '123', 'a b': 'with spaces ', 'same': 'same', '0': 'numeric key', + 'longValue': stringTest, stringTest: 'long key' + }; + + client.testStringMap(mapTestInput).then(function(result) { + for (var key in result) { + equal(result[key], mapTestInput[key]); + } + QUnit.start(); + }); + }); + + asyncTest('Set', function() { + expect(1); + var setTestInput = [1, 2, 3]; + client.testSet(setTestInput).then(function(result) { + ok(result, setTestInput); + QUnit.start(); + }); + }); + + asyncTest('List', function() { + expect(1); + var listTestInput = [1, 2, 3]; + client.testList(listTestInput).then(function(result) { + ok(result, listTestInput); + QUnit.start(); + }); + }); + + asyncTest('Enum', function() { + expect(1); + client.testEnum(ThriftTest.Numberz.ONE).then(function(result) { + equal(result, ThriftTest.Numberz.ONE); + QUnit.start(); + }); + }); + + asyncTest('TypeDef', function() { + expect(1); + client.testTypedef(69).then(function(result) { + equal(result, 69); + QUnit.start(); + }); + }); + + +module('deeper!'); + + asyncTest('MapMap', function() { + expect(16); + var mapMapTestExpectedResult = { + '4': {'1': 1, '2': 2, '3': 3, '4': 4}, + '-4': {'-4': -4, '-3': -3, '-2': -2, '-1': -1} + }; + + client.testMapMap(1).then(function(result) { + for (var key in result) { + for (var key2 in result[key]) { + equal(result[key][key2], mapMapTestExpectedResult[key][key2]); + } + } + checkRecursively(result, mapMapTestExpectedResult); + QUnit.start(); + }); + }); + + +module('Exception'); + + asyncTest('Xception', function() { + expect(2); + client.testException('Xception').then(function(res) { + ok(false); + }).catch(function(e) { + equal(e.errorCode, 1001); + equal(e.message, 'Xception'); + QUnit.start(); + }); + }); + + asyncTest('no Exception', 0, function() { + expect(1); + client.testException('no Exception').then(function(e) { + ok(!e); + QUnit.start(); + }); + }); + +module('Insanity'); + + asyncTest('testInsanity', function() { + expect(24); + var insanity = { + '1': { + '2': { + 'userMap': { '5': 5, '8': 8 }, + 'xtructs': [{ + 'string_thing': 'Goodbye4', + 'byte_thing': 4, + 'i32_thing': 4, + 'i64_thing': 4 + }, + { + 'string_thing': 'Hello2', + 'byte_thing': 2, + 'i32_thing': 2, + 'i64_thing': 2 + } + ] + }, + '3': { + 'userMap': { '5': 5, '8': 8 }, + 'xtructs': [{ + 'string_thing': 'Goodbye4', + 'byte_thing': 4, + 'i32_thing': 4, + 'i64_thing': 4 + }, + { + 'string_thing': 'Hello2', + 'byte_thing': 2, + 'i32_thing': 2, + 'i64_thing': 2 + } + ] + } + }, + '2': { '6': { 'userMap': null, 'xtructs': null } } + }; + client.testInsanity(new ThriftTest.Insanity()).then(function(res) { + ok(res, JSON.stringify(res)); + ok(insanity, JSON.stringify(insanity)); + checkRecursively(res, insanity); + QUnit.start(); + }); + }); + +module('Oneway'); + asyncTest('testOneway', function() { + expect(1); + client.testOneway(1).then(function(result) { + equal(result, undefined); + QUnit.start(); + }); + }); \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/lua/THttpTransport.lua b/vendor/git.apache.org/thrift.git/lib/lua/THttpTransport.lua index 041e42188..5bbfece25 100644 --- a/vendor/git.apache.org/thrift.git/lib/lua/THttpTransport.lua +++ b/vendor/git.apache.org/thrift.git/lib/lua/THttpTransport.lua @@ -25,7 +25,7 @@ THttpTransport = TTransportBase:new{ wBuf = '', rBuf = '', CRLF = '\r\n', - VERSION = '0.11.0', + VERSION = '1.0.0', isServer = true } diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Makefile.am b/vendor/git.apache.org/thrift.git/lib/netcore/Makefile.am index facee11dd..caf3f3455 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Makefile.am @@ -19,87 +19,23 @@ SUBDIRS = . -THRIFT = $(top_builddir)/compiler/cpp/thrift - -TESTDIR = Tests/Thrift.PublicInterfaces.Compile.Tests -GENDIR = $(TESTDIR)/gen-netcore - -THRIFTCODE = \ - Thrift/Thrift.csproj \ - Thrift/ITAsyncProcessor.cs \ - Thrift/ITProcessorFactory.cs \ - Thrift/SingletonTProcessorFactory.cs \ - Thrift/TApplicationException.cs \ - Thrift/TBaseClient.cs \ - Thrift/TException.cs \ - Thrift/TMultiplexedProcessor.cs \ - Thrift/Collections/TCollections.cs \ - Thrift/Collections/THashSet.cs \ - Thrift/Properties/AssemblyInfo.cs \ - Thrift/Protocols/ITProtocolFactory.cs \ - Thrift/Protocols/TAbstractBase.cs \ - Thrift/Protocols/TBase.cs \ - Thrift/Protocols/TBinaryProtocol.cs \ - Thrift/Protocols/TCompactProtocol.cs \ - Thrift/Protocols/TJSONProtocol.cs \ - Thrift/Protocols/TMultiplexedProtocol.cs \ - Thrift/Protocols/TProtocol.cs \ - Thrift/Protocols/TProtocolDecorator.cs \ - Thrift/Protocols/TProtocolException.cs \ - Thrift/Protocols/Entities/TField.cs \ - Thrift/Protocols/Entities/TList.cs \ - Thrift/Protocols/Entities/TMap.cs \ - Thrift/Protocols/Entities/TMessage.cs \ - Thrift/Protocols/Entities/TMessageType.cs \ - Thrift/Protocols/Entities/TSet.cs \ - Thrift/Protocols/Entities/TStruct.cs \ - Thrift/Protocols/Entities/TType.cs \ - Thrift/Protocols/Utilities/TBase64Utils.cs \ - Thrift/Protocols/Utilities/TProtocolUtil.cs \ - Thrift/Server/AsyncBaseServer.cs \ - Thrift/Server/TBaseServer.cs \ - Thrift/Server/TServerEventHandler.cs \ - Thrift/Transports/TClientTransport.cs \ - Thrift/Transports/TServerTransport.cs \ - Thrift/Transports/TTransportException.cs \ - Thrift/Transports/TTransportFactory.cs \ - Thrift/Transports/Client/TBufferedClientTransport.cs \ - Thrift/Transports/Client/TFramedClientTransport.cs \ - Thrift/Transports/Client/THttpClientTransport.cs \ - Thrift/Transports/Client/TMemoryBufferClientTransport.cs \ - Thrift/Transports/Client/TNamedPipeClientTransport.cs \ - Thrift/Transports/Client/TSocketClientTransport.cs \ - Thrift/Transports/Client/TStreamClientTransport.cs \ - Thrift/Transports/Client/TTlsSocketClientTransport.cs \ - Thrift/Transports/Server/THttpServerTransport.cs \ - Thrift/Transports/Server/TNamedPipeServerTransport.cs \ - Thrift/Transports/Server/TServerFramedTransport.cs \ - Thrift/Transports/Server/TServerSocketTransport.cs \ - Thrift/Transports/Server/TTlsServerSocketTransport.cs - -all-local: \ - Thrift.dll - -Thrift.dll: $(THRIFTCODE) - $(MKDIR_P) $(GENDIR) - $(THRIFT) -gen netcore:wcf -r -out $(GENDIR) $(TESTDIR)/CassandraTest.thrift - $(THRIFT) -gen netcore:wcf -r -out $(GENDIR) $(top_srcdir)/test/ThriftTest.thrift - $(THRIFT) -gen netcore:wcf -r -out $(GENDIR) $(top_srcdir)/contrib/fb303/if/fb303.thrift - $(DOTNETCORE) --info - $(DOTNETCORE) restore +all-local: $(DOTNETCORE) build +check-local: + $(DOTNETCORE) test Tests/Thrift.Tests/Thrift.Tests.csproj + ${DOTNETCORE} test Tests/Thrift.IntegrationTests/Thrift.IntegrationTests.csproj + clean-local: - $(RM) Thrift.dll - $(RM) -r $(GENDIR) $(RM) -r Thrift/bin $(RM) -r Thrift/obj - $(RM) -r Tests/Thrift.PublicInterfaces.Compile.Tests/bin - $(RM) -r Tests/Thrift.PublicInterfaces.Compile.Tests/obj EXTRA_DIST = \ - $(THRIFTCODE) \ - Thrift.sln \ - Tests \ - README.md - + README.md \ + Tests \ + Thrift \ + Thrift.sln \ + build.cmd \ + build.sh \ + runtests.cmd \ + runtests.sh diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/README.md b/vendor/git.apache.org/thrift.git/lib/netcore/README.md index 39492f3c0..94b047f5c 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/README.md +++ b/vendor/git.apache.org/thrift.git/lib/netcore/README.md @@ -10,7 +10,10 @@ Thrift client library ported to Microsoft .Net Core - .NET Standard 1.6 (SDK 2.0.0) # How to build on Windows +- Get Thrift IDL compiler executable, add to some folder and add path to this folder into PATH variable - Open the Thrift.sln project with Visual Studio and build. +or +- Build with scripts # How to build on Unix - Ensure you have .NET Core 2.0.0 SDK installed or use the Ubuntu Xenial docker image diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/.gitignore b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/.gitignore new file mode 100644 index 000000000..7254c313a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/.gitignore @@ -0,0 +1,2 @@ +# ignore for autogenerated files +/Apache diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Protocols/ProtocolsOperationsTests.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Protocols/ProtocolsOperationsTests.cs new file mode 100644 index 000000000..bc4afa156 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Protocols/ProtocolsOperationsTests.cs @@ -0,0 +1,502 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; +using System.IO; +using System.Text; +using System.Threading.Tasks; +using KellermanSoftware.CompareNetObjects; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Thrift.Protocols; +using Thrift.Protocols.Entities; +using Thrift.Transports.Client; + +namespace Thrift.IntegrationTests.Protocols +{ + [TestClass] + public class ProtocolsOperationsTests + { + private readonly CompareLogic _compareLogic = new CompareLogic(); + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol), TMessageType.Call)] + [DataRow(typeof(TBinaryProtocol), TMessageType.Exception)] + [DataRow(typeof(TBinaryProtocol), TMessageType.Oneway)] + [DataRow(typeof(TBinaryProtocol), TMessageType.Reply)] + [DataRow(typeof(TCompactProtocol), TMessageType.Call)] + [DataRow(typeof(TCompactProtocol), TMessageType.Exception)] + [DataRow(typeof(TCompactProtocol), TMessageType.Oneway)] + [DataRow(typeof(TCompactProtocol), TMessageType.Reply)] + [DataRow(typeof(TJsonProtocol), TMessageType.Call)] + [DataRow(typeof(TJsonProtocol), TMessageType.Exception)] + [DataRow(typeof(TJsonProtocol), TMessageType.Oneway)] + [DataRow(typeof(TJsonProtocol), TMessageType.Reply)] + public async Task WriteReadMessage_Test(Type protocolType, TMessageType messageType) + { + var expected = new TMessage(nameof(TMessage), messageType, 1); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteMessageBeginAsync(expected); + await protocol.WriteMessageEndAsync(); + + stream.Seek(0, SeekOrigin.Begin); + + var actualMessage = await protocol.ReadMessageBeginAsync(); + await protocol.ReadMessageEndAsync(); + + var result = _compareLogic.Compare(expected, actualMessage); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + [ExpectedException(typeof(Exception))] + public async Task WriteReadStruct_Test(Type protocolType) + { + var expected = new TStruct(nameof(TStruct)); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteStructBeginAsync(expected); + await protocol.WriteStructEndAsync(); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadStructBeginAsync(); + await protocol.ReadStructEndAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + [ExpectedException(typeof(Exception))] + public async Task WriteReadField_Test(Type protocolType) + { + var expected = new TField(nameof(TField), TType.String, 1); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteFieldBeginAsync(expected); + await protocol.WriteFieldEndAsync(); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadFieldBeginAsync(); + await protocol.ReadFieldEndAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadMap_Test(Type protocolType) + { + var expected = new TMap(TType.String, TType.String, 1); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteMapBeginAsync(expected); + await protocol.WriteMapEndAsync(); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadMapBeginAsync(); + await protocol.ReadMapEndAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadList_Test(Type protocolType) + { + var expected = new TList(TType.String, 1); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteListBeginAsync(expected); + await protocol.WriteListEndAsync(); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadListBeginAsync(); + await protocol.ReadListEndAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadSet_Test(Type protocolType) + { + var expected = new TSet(TType.String, 1); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteSetBeginAsync(expected); + await protocol.WriteSetEndAsync(); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadSetBeginAsync(); + await protocol.ReadSetEndAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadBool_Test(Type protocolType) + { + var expected = true; + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteBoolAsync(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadBoolAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadByte_Test(Type protocolType) + { + var expected = sbyte.MaxValue; + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteByteAsync(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadByteAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadI16_Test(Type protocolType) + { + var expected = short.MaxValue; + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteI16Async(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadI16Async(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadI32_Test(Type protocolType) + { + var expected = int.MaxValue; + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteI32Async(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadI32Async(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadI64_Test(Type protocolType) + { + var expected = long.MaxValue; + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteI64Async(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadI64Async(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadDouble_Test(Type protocolType) + { + var expected = double.MaxValue; + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteDoubleAsync(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadDoubleAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadString_Test(Type protocolType) + { + var expected = nameof(String); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteStringAsync(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadStringAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + [DataTestMethod] + [DataRow(typeof(TBinaryProtocol))] + [DataRow(typeof(TCompactProtocol))] + [DataRow(typeof(TJsonProtocol))] + public async Task WriteReadBinary_Test(Type protocolType) + { + var expected = Encoding.UTF8.GetBytes(nameof(String)); + + try + { + var tuple = GetProtocolInstance(protocolType); + using (var stream = tuple.Item1) + { + var protocol = tuple.Item2; + + await protocol.WriteBinaryAsync(expected); + + stream?.Seek(0, SeekOrigin.Begin); + + var actual = await protocol.ReadBinaryAsync(); + + var result = _compareLogic.Compare(expected, actual); + Assert.IsTrue(result.AreEqual, result.DifferencesString); + } + } + catch (Exception e) + { + throw new Exception($"Exception during testing of protocol: {protocolType.FullName}", e); + } + } + + private static Tuple GetProtocolInstance(Type protocolType) + { + var memoryStream = new MemoryStream(); + var streamClientTransport = new TStreamClientTransport(memoryStream, memoryStream); + var protocol = (TProtocol) Activator.CreateInstance(protocolType, streamClientTransport); + return new Tuple(memoryStream, protocol); + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Thrift.IntegrationTests.csproj b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Thrift.IntegrationTests.csproj new file mode 100644 index 000000000..f25dac536 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.IntegrationTests/Thrift.IntegrationTests.csproj @@ -0,0 +1,29 @@ + + + netcoreapp2.0 + Thrift.IntegrationTests + Thrift.IntegrationTests + Exe + false + false + false + false + false + false + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/.gitignore b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/.gitignore new file mode 100644 index 000000000..ae929a32e --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/.gitignore @@ -0,0 +1,4 @@ +# ignore for autogenerated files +/ThriftTest +/Apache +/Facebook diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/Thrift.PublicInterfaces.Compile.Tests.csproj b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/Thrift.PublicInterfaces.Compile.Tests.csproj index f55111613..c4a84a301 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/Thrift.PublicInterfaces.Compile.Tests.csproj +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.PublicInterfaces.Compile.Tests/Thrift.PublicInterfaces.Compile.Tests.csproj @@ -18,4 +18,19 @@ + + + + + + + + + + + + + + + diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/TCollectionsTests.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/TCollectionsTests.cs new file mode 100644 index 000000000..1be99b48f --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/TCollectionsTests.cs @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; +using System.Collections.Generic; +using System.Text; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Thrift.Collections; + +namespace Thrift.Tests.Collections +{ + // ReSharper disable once InconsistentNaming + [TestClass] + public class TCollectionsTests + { + //TODO: Add tests for IEnumerable with objects and primitive values inside + + [TestMethod] + public void TCollection_Equals_Primitive_Test() + { + var collection1 = new List {1,2,3}; + var collection2 = new List {1,2,3}; + + var result = TCollections.Equals(collection1, collection2); + + Assert.IsTrue(result); + } + + [TestMethod] + public void TCollection_Equals_Primitive_Different_Test() + { + var collection1 = new List { 1, 2, 3 }; + var collection2 = new List { 1, 2 }; + + var result = TCollections.Equals(collection1, collection2); + + Assert.IsFalse(result); + } + + [TestMethod] + public void TCollection_Equals_Objects_Test() + { + var collection1 = new List { new ExampleClass { X = 1 }, new ExampleClass { X = 2 } }; + var collection2 = new List { new ExampleClass { X = 1 }, new ExampleClass { X = 2 } }; + + var result = TCollections.Equals(collection1, collection2); + + // references to different collections + Assert.IsFalse(result); + } + + [TestMethod] + public void TCollection_Equals_OneAndTheSameObject_Test() + { + var collection1 = new List { new ExampleClass { X = 1 }, new ExampleClass { X = 2 } }; + var collection2 = collection1; + + var result = TCollections.Equals(collection1, collection2); + + // references to one and the same collection + Assert.IsTrue(result); + } + + private class ExampleClass + { + public int X { get; set; } + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/THashSetTests.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/THashSetTests.cs new file mode 100644 index 000000000..8de573eee --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Collections/THashSetTests.cs @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Thrift.Collections; + +namespace Thrift.Tests.Collections +{ + // ReSharper disable once InconsistentNaming + [TestClass] + public class THashSetTests + { + [TestMethod] + public void THashSet_Equals_Primitive_Test() + { + const int value = 1; + + var hashSet = new THashSet {value}; + + Assert.IsTrue(hashSet.Contains(value)); + + hashSet.Remove(value); + + Assert.IsTrue(hashSet.Count == 0); + + hashSet.Add(value); + + Assert.IsTrue(hashSet.Contains(value)); + + hashSet.Clear(); + + Assert.IsTrue(hashSet.Count == 0); + + var newArr = new int[1]; + hashSet.Add(value); + hashSet.CopyTo(newArr, 0); + + Assert.IsTrue(newArr.Contains(value)); + + var en = hashSet.GetEnumerator(); + en.MoveNext(); + + Assert.IsTrue((int)en.Current == value); + + using (var ien = ((IEnumerable)hashSet).GetEnumerator()) + { + ien.MoveNext(); + + Assert.IsTrue(ien.Current == value); + } + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolHelperTests.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolHelperTests.cs new file mode 100644 index 000000000..cdc8317e2 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolHelperTests.cs @@ -0,0 +1,172 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; +using System.Collections.Generic; +using System.Linq; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using Thrift.Protocols; +using Thrift.Protocols.Entities; +using Thrift.Protocols.Utilities; + +namespace Thrift.Tests.Protocols +{ + [TestClass] + public class TJSONProtocolHelperTests + { + [TestMethod] + public void GetTypeNameForTypeId_Test() + { + // input/output + var sets = new List> + { + new Tuple(TType.Bool, TJSONProtocolConstants.TypeNames.NameBool), + new Tuple(TType.Byte, TJSONProtocolConstants.TypeNames.NameByte), + new Tuple(TType.I16, TJSONProtocolConstants.TypeNames.NameI16), + new Tuple(TType.I32, TJSONProtocolConstants.TypeNames.NameI32), + new Tuple(TType.I64, TJSONProtocolConstants.TypeNames.NameI64), + new Tuple(TType.Double, TJSONProtocolConstants.TypeNames.NameDouble), + new Tuple(TType.String, TJSONProtocolConstants.TypeNames.NameString), + new Tuple(TType.Struct, TJSONProtocolConstants.TypeNames.NameStruct), + new Tuple(TType.Map, TJSONProtocolConstants.TypeNames.NameMap), + new Tuple(TType.Set, TJSONProtocolConstants.TypeNames.NameSet), + new Tuple(TType.List, TJSONProtocolConstants.TypeNames.NameList), + }; + + foreach (var t in sets) + { + Assert.IsTrue(TJSONProtocolHelper.GetTypeNameForTypeId(t.Item1) == t.Item2, $"Wrong mapping of TypeName {t.Item2} to TType: {t.Item1}"); + } + } + + [TestMethod] + [ExpectedException(typeof(TProtocolException))] + public void GetTypeNameForTypeId_TStop_Test() + { + TJSONProtocolHelper.GetTypeNameForTypeId(TType.Stop); + } + + [TestMethod] + [ExpectedException(typeof(TProtocolException))] + public void GetTypeNameForTypeId_NonExistingTType_Test() + { + TJSONProtocolHelper.GetTypeNameForTypeId((TType)100); + } + + [TestMethod] + public void GetTypeIdForTypeName_Test() + { + // input/output + var sets = new List> + { + new Tuple(TType.Bool, TJSONProtocolConstants.TypeNames.NameBool), + new Tuple(TType.Byte, TJSONProtocolConstants.TypeNames.NameByte), + new Tuple(TType.I16, TJSONProtocolConstants.TypeNames.NameI16), + new Tuple(TType.I32, TJSONProtocolConstants.TypeNames.NameI32), + new Tuple(TType.I64, TJSONProtocolConstants.TypeNames.NameI64), + new Tuple(TType.Double, TJSONProtocolConstants.TypeNames.NameDouble), + new Tuple(TType.String, TJSONProtocolConstants.TypeNames.NameString), + new Tuple(TType.Struct, TJSONProtocolConstants.TypeNames.NameStruct), + new Tuple(TType.Map, TJSONProtocolConstants.TypeNames.NameMap), + new Tuple(TType.Set, TJSONProtocolConstants.TypeNames.NameSet), + new Tuple(TType.List, TJSONProtocolConstants.TypeNames.NameList), + }; + + foreach (var t in sets) + { + Assert.IsTrue(TJSONProtocolHelper.GetTypeIdForTypeName(t.Item2) == t.Item1, $"Wrong mapping of TypeName {t.Item2} to TType: {t.Item1}"); + } + } + + [TestMethod] + [ExpectedException(typeof(TProtocolException))] + public void GetTypeIdForTypeName_TStopTypeName_Test() + { + TJSONProtocolHelper.GetTypeIdForTypeName(new []{(byte)TType.Stop, (byte)TType.Stop}); + } + + [TestMethod] + [ExpectedException(typeof(TProtocolException))] + public void GetTypeIdForTypeName_NonExistingTypeName_Test() + { + TJSONProtocolHelper.GetTypeIdForTypeName(new byte[]{100}); + } + + [TestMethod] + [ExpectedException(typeof(TProtocolException))] + public void GetTypeIdForTypeName_EmptyName_Test() + { + TJSONProtocolHelper.GetTypeIdForTypeName(new byte[] {}); + } + + [TestMethod] + public void IsJsonNumeric_Test() + { + // input/output + var correctJsonNumeric = "+-.0123456789Ee"; + var incorrectJsonNumeric = "AaBcDd/*\\"; + + var sets = correctJsonNumeric.Select(ch => new Tuple((byte) ch, true)).ToList(); + sets.AddRange(incorrectJsonNumeric.Select(ch => new Tuple((byte) ch, false))); + + foreach (var t in sets) + { + Assert.IsTrue(TJSONProtocolHelper.IsJsonNumeric(t.Item1) == t.Item2, $"Wrong mapping of Char {t.Item1} to bool: {t.Item2}"); + } + } + + [TestMethod] + public void ToHexVal_Test() + { + // input/output + var chars = "0123456789abcdef"; + var expectedHexValues = new byte[] {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; + + var sets = chars.Select((ch, i) => new Tuple(ch, expectedHexValues[i])).ToList(); + + foreach (var t in sets) + { + var actualResult = TJSONProtocolHelper.ToHexVal((byte)t.Item1); + Assert.IsTrue(actualResult == t.Item2, $"Wrong mapping of char byte {t.Item1} to it expected hex value: {t.Item2}. Actual hex value: {actualResult}"); + } + } + + [TestMethod] + [ExpectedException(typeof(TProtocolException))] + public void ToHexVal_WrongInputChar_Test() + { + TJSONProtocolHelper.ToHexVal((byte)'s'); + } + + [TestMethod] + public void ToHexChar_Test() + { + // input/output + var hexValues = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; + var expectedChars = "0123456789abcdef"; + + + var sets = hexValues.Select((hv, i) => new Tuple(hv, expectedChars[i])).ToList(); + + foreach (var t in sets) + { + var actualResult = TJSONProtocolHelper.ToHexChar(t.Item1); + Assert.IsTrue(actualResult == t.Item2, $"Wrong mapping of hex value {t.Item1} to it expected char: {t.Item2}. Actual hex value: {actualResult}"); + } + } + } +} \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolTests.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolTests.cs new file mode 100644 index 000000000..523736086 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Protocols/TJsonProtocolTests.cs @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; +using System.Collections.Generic; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using NSubstitute; +using Thrift.Protocols; +using Thrift.Protocols.Entities; +using Thrift.Transports; +using Thrift.Transports.Client; + +namespace Thrift.Tests.Protocols +{ + // ReSharper disable once InconsistentNaming + [TestClass] + public class TJSONProtocolTests + { + [TestMethod] + public void TJSONProtocol_Can_Create_Instance_Test() + { + var httpClientTransport = Substitute.For(new Uri("http://localhost"), null); + + var result = new TJSONProtocolWrapper(httpClientTransport); + + Assert.IsNotNull(result); + Assert.IsNotNull(result.WrappedContext); + Assert.IsNotNull(result.WrappedReader); + Assert.IsNotNull(result.Transport); + Assert.IsTrue(result.WrappedRecursionDepth == 0); + Assert.IsTrue(result.WrappedRecursionLimit == TProtocol.DefaultRecursionDepth); + + Assert.IsTrue(result.Transport.Equals(httpClientTransport)); + Assert.IsTrue(result.WrappedContext.GetType().Name.Equals("JSONBaseContext", StringComparison.OrdinalIgnoreCase)); + Assert.IsTrue(result.WrappedReader.GetType().Name.Equals("LookaheadReader", StringComparison.OrdinalIgnoreCase)); + } + + private class TJSONProtocolWrapper : TJsonProtocol + { + public TJSONProtocolWrapper(TClientTransport trans) : base(trans) + { + } + + public object WrappedContext => Context; + public object WrappedReader => Reader; + public int WrappedRecursionDepth => RecursionDepth; + public int WrappedRecursionLimit => RecursionLimit; + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Thrift.Tests.csproj b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Thrift.Tests.csproj new file mode 100644 index 000000000..e46f16522 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Tests/Thrift.Tests/Thrift.Tests.csproj @@ -0,0 +1,18 @@ + + + netcoreapp2.0 + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift.sln b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift.sln index a730269d6..fe30aa5c6 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift.sln +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift.sln @@ -4,10 +4,14 @@ VisualStudioVersion = 15.0.26730.12 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tests", "Tests", "{F51FC4DA-CAC0-48B1-A069-B1712BCAA5BE}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift.PublicInterfaces.Compile.Tests", "Tests\Thrift.PublicInterfaces.Compile.Tests\Thrift.PublicInterfaces.Compile.Tests.csproj", "{0676962B-98C2-49EC-B4C4-7A0451D0640B}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift", "Thrift\Thrift.csproj", "{D85F572F-7D80-40A4-9A9B-2731ED187C24}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift.IntegrationTests", "Tests\Thrift.IntegrationTests\Thrift.IntegrationTests.csproj", "{9F9A11BF-3C95-4E80-AFBF-768541996844}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift.Tests", "Tests\Thrift.Tests\Thrift.Tests.csproj", "{75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift.PublicInterfaces.Compile.Tests", "Tests\Thrift.PublicInterfaces.Compile.Tests\Thrift.PublicInterfaces.Compile.Tests.csproj", "{A429F05B-F511-45EF-AE7B-04E1AE9C9367}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -18,18 +22,6 @@ Global Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Debug|x64.ActiveCfg = Debug|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Debug|x64.Build.0 = Debug|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Debug|x86.ActiveCfg = Debug|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Debug|x86.Build.0 = Debug|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Release|Any CPU.Build.0 = Release|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Release|x64.ActiveCfg = Release|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Release|x64.Build.0 = Release|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Release|x86.ActiveCfg = Release|Any CPU - {0676962B-98C2-49EC-B4C4-7A0451D0640B}.Release|x86.Build.0 = Release|Any CPU {D85F572F-7D80-40A4-9A9B-2731ED187C24}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {D85F572F-7D80-40A4-9A9B-2731ED187C24}.Debug|Any CPU.Build.0 = Debug|Any CPU {D85F572F-7D80-40A4-9A9B-2731ED187C24}.Debug|x64.ActiveCfg = Debug|Any CPU @@ -42,12 +34,50 @@ Global {D85F572F-7D80-40A4-9A9B-2731ED187C24}.Release|x64.Build.0 = Release|Any CPU {D85F572F-7D80-40A4-9A9B-2731ED187C24}.Release|x86.ActiveCfg = Release|Any CPU {D85F572F-7D80-40A4-9A9B-2731ED187C24}.Release|x86.Build.0 = Release|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Debug|Any CPU.Build.0 = Debug|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Debug|x64.ActiveCfg = Debug|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Debug|x64.Build.0 = Debug|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Debug|x86.ActiveCfg = Debug|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Debug|x86.Build.0 = Debug|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Release|Any CPU.ActiveCfg = Release|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Release|Any CPU.Build.0 = Release|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Release|x64.ActiveCfg = Release|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Release|x64.Build.0 = Release|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Release|x86.ActiveCfg = Release|Any CPU + {9F9A11BF-3C95-4E80-AFBF-768541996844}.Release|x86.Build.0 = Release|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Debug|Any CPU.Build.0 = Debug|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Debug|x64.ActiveCfg = Debug|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Debug|x64.Build.0 = Debug|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Debug|x86.ActiveCfg = Debug|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Debug|x86.Build.0 = Debug|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Release|Any CPU.ActiveCfg = Release|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Release|Any CPU.Build.0 = Release|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Release|x64.ActiveCfg = Release|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Release|x64.Build.0 = Release|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Release|x86.ActiveCfg = Release|Any CPU + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1}.Release|x86.Build.0 = Release|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Debug|x64.ActiveCfg = Debug|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Debug|x64.Build.0 = Debug|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Debug|x86.ActiveCfg = Debug|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Debug|x86.Build.0 = Debug|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Release|Any CPU.Build.0 = Release|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Release|x64.ActiveCfg = Release|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Release|x64.Build.0 = Release|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Release|x86.ActiveCfg = Release|Any CPU + {A429F05B-F511-45EF-AE7B-04E1AE9C9367}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution - {0676962B-98C2-49EC-B4C4-7A0451D0640B} = {F51FC4DA-CAC0-48B1-A069-B1712BCAA5BE} + {9F9A11BF-3C95-4E80-AFBF-768541996844} = {F51FC4DA-CAC0-48B1-A069-B1712BCAA5BE} + {75C2F9DC-3546-4D0A-A2DF-31C93516B6C1} = {F51FC4DA-CAC0-48B1-A069-B1712BCAA5BE} + {A429F05B-F511-45EF-AE7B-04E1AE9C9367} = {F51FC4DA-CAC0-48B1-A069-B1712BCAA5BE} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {FD20BC4A-0109-41D8-8C0C-893E784D7EF9} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Properties/AssemblyInfo.cs index 61cd3e30b..e3118ab23 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Properties/AssemblyInfo.cs @@ -52,5 +52,5 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: -[assembly: AssemblyVersion("0.11.0.1")] -[assembly: AssemblyFileVersion("0.11.0.1")] \ No newline at end of file +[assembly: AssemblyVersion("1.0.0.1")] +[assembly: AssemblyFileVersion("1.0.0.1")] \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TAbstractBase.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TAbstractBase.cs index eddb85e75..4e18681bf 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TAbstractBase.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TAbstractBase.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBase.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBase.cs index cd1109971..014e1aee8 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBase.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBase.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBinaryProtocol.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBinaryProtocol.cs index fa0c5fc93..deec85c42 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBinaryProtocol.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TBinaryProtocol.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -79,7 +79,7 @@ namespace Thrift.Protocols } } - public override async Task WriteStructBeginAsync(TStruct struc, CancellationToken cancellationToken) + public override async Task WriteStructBeginAsync(TStruct @struct, CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { @@ -293,15 +293,15 @@ namespace Thrift.Protocols await WriteI64Async(BitConverter.DoubleToInt64Bits(d), cancellationToken); } - public override async Task WriteBinaryAsync(byte[] b, CancellationToken cancellationToken) + public override async Task WriteBinaryAsync(byte[] bytes, CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { return; } - await WriteI32Async(b.Length, cancellationToken); - await Trans.WriteAsync(b, 0, b.Length, cancellationToken); + await WriteI32Async(bytes.Length, cancellationToken); + await Trans.WriteAsync(bytes, 0, bytes.Length, cancellationToken); } public override async Task ReadMessageBeginAsync(CancellationToken cancellationToken) @@ -511,8 +511,13 @@ namespace Thrift.Protocols var i32In = new byte[4]; await Trans.ReadAllAsync(i32In, 0, 4, cancellationToken); - var result = ((i32In[0] & 0xff) << 24) | ((i32In[1] & 0xff) << 16) | ((i32In[2] & 0xff) << 8) | - i32In[3] & 0xff; + + var result = + ((i32In[0] & 0xff) << 24) | + ((i32In[1] & 0xff) << 16) | + ((i32In[2] & 0xff) << 8) | + i32In[3] & 0xff; + return result; } diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TCompactProtocol.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TCompactProtocol.cs index 6d5e0bf35..cecdf03cc 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TCompactProtocol.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TCompactProtocol.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -114,7 +114,7 @@ namespace Thrift.Protocols /// use it as an opportunity to put special placeholder markers on the field /// stack so we can get the field id deltas correct. /// - public override async Task WriteStructBeginAsync(TStruct struc, CancellationToken cancellationToken) + public override async Task WriteStructBeginAsync(TStruct @struct, CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { @@ -383,16 +383,16 @@ namespace Thrift.Protocols await Trans.WriteAsync(bytes, 0, bytes.Length, cancellationToken); } - public override async Task WriteBinaryAsync(byte[] b, CancellationToken cancellationToken) + public override async Task WriteBinaryAsync(byte[] bytes, CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { return; } - var bufferTuple = CreateWriteVarInt32((uint) b.Length); + var bufferTuple = CreateWriteVarInt32((uint) bytes.Length); await Trans.WriteAsync(bufferTuple.Item1, 0, bufferTuple.Item2, cancellationToken); - await Trans.WriteAsync(b, 0, b.Length, cancellationToken); + await Trans.WriteAsync(bytes, 0, bytes.Length, cancellationToken); } public override async Task WriteMapBeginAsync(TMap map, CancellationToken cancellationToken) diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TJSONProtocol.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TJSONProtocol.cs index a4ddd5b28..6d33f029e 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TJSONProtocol.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TJSONProtocol.cs @@ -19,6 +19,7 @@ using System; using System.Collections.Generic; using System.Globalization; using System.IO; +using System.Linq; using System.Text; using System.Threading; using System.Threading.Tasks; @@ -28,8 +29,6 @@ using Thrift.Transports; namespace Thrift.Protocols { - //TODO: implementation of TProtocol - /// /// JSON protocol implementation for thrift. /// This is a full-featured protocol supporting Write and Read. @@ -42,53 +41,14 @@ namespace Thrift.Protocols { private const long Version = 1; - private const int DefStringSize = 16; - - private static readonly byte[] Comma = {(byte) ','}; - private static readonly byte[] Colon = {(byte) ':'}; - private static readonly byte[] Lbrace = {(byte) '{'}; - private static readonly byte[] Rbrace = {(byte) '}'}; - private static readonly byte[] Lbracket = {(byte) '['}; - private static readonly byte[] Rbracket = {(byte) ']'}; - private static readonly byte[] Quote = {(byte) '"'}; - private static readonly byte[] Backslash = {(byte) '\\'}; - - private static readonly byte[] NameBool = {(byte) 't', (byte) 'f'}; - private static readonly byte[] NameByte = {(byte) 'i', (byte) '8'}; - private static readonly byte[] NameI16 = {(byte) 'i', (byte) '1', (byte) '6'}; - private static readonly byte[] NameI32 = {(byte) 'i', (byte) '3', (byte) '2'}; - private static readonly byte[] NameI64 = {(byte) 'i', (byte) '6', (byte) '4'}; - private static readonly byte[] NameDouble = {(byte) 'd', (byte) 'b', (byte) 'l'}; - private static readonly byte[] NameStruct = {(byte) 'r', (byte) 'e', (byte) 'c'}; - private static readonly byte[] NameString = {(byte) 's', (byte) 't', (byte) 'r'}; - private static readonly byte[] NameMap = {(byte) 'm', (byte) 'a', (byte) 'p'}; - private static readonly byte[] NameList = {(byte) 'l', (byte) 's', (byte) 't'}; - private static readonly byte[] NameSet = {(byte) 's', (byte) 'e', (byte) 't'}; - - private readonly char[] _escapeChars = "\"\\/bfnrt".ToCharArray(); - - private readonly byte[] _escapeCharVals = - { - (byte) '"', (byte) '\\', (byte) '/', (byte) '\b', (byte) '\f', (byte) '\n', (byte) '\r', (byte) '\t' - }; - - private readonly byte[] _escseq = {(byte) '\\', (byte) 'u', (byte) '0', (byte) '0'}; - - private readonly byte[] _jsonCharTable = - { - 0, 0, 0, 0, 0, 0, 0, 0, (byte) 'b', (byte) 't', (byte) 'n', 0, (byte) 'f', (byte) 'r', 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, (byte) '"', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 - }; - // Temporary buffer used by several methods private readonly byte[] _tempBuffer = new byte[4]; // Current context that we are in - protected JsonBaseContext Context; + protected JSONBaseContext Context; // Stack of nested contexts that we may be in - protected Stack ContextStack = new Stack(); + protected Stack ContextStack = new Stack(); // Reader that manages a 1-byte buffer protected LookaheadReader Reader; @@ -102,105 +62,14 @@ namespace Thrift.Protocols public TJsonProtocol(TClientTransport trans) : base(trans) { - //throw new NotImplementedException("TJsonProtocol is not fully ready for usage"); - - Context = new JsonBaseContext(this); + Context = new JSONBaseContext(this); Reader = new LookaheadReader(this); } - private static byte[] GetTypeNameForTypeId(TType typeId) - { - switch (typeId) - { - case TType.Bool: - return NameBool; - case TType.Byte: - return NameByte; - case TType.I16: - return NameI16; - case TType.I32: - return NameI32; - case TType.I64: - return NameI64; - case TType.Double: - return NameDouble; - case TType.String: - return NameString; - case TType.Struct: - return NameStruct; - case TType.Map: - return NameMap; - case TType.Set: - return NameSet; - case TType.List: - return NameList; - default: - throw new TProtocolException(TProtocolException.NOT_IMPLEMENTED, "Unrecognized exType"); - } - } - - private static TType GetTypeIdForTypeName(byte[] name) - { - var result = TType.Stop; - if (name.Length > 1) - { - switch (name[0]) - { - case (byte) 'd': - result = TType.Double; - break; - case (byte) 'i': - switch (name[1]) - { - case (byte) '8': - result = TType.Byte; - break; - case (byte) '1': - result = TType.I16; - break; - case (byte) '3': - result = TType.I32; - break; - case (byte) '6': - result = TType.I64; - break; - } - break; - case (byte) 'l': - result = TType.List; - break; - case (byte) 'm': - result = TType.Map; - break; - case (byte) 'r': - result = TType.Struct; - break; - case (byte) 's': - if (name[1] == (byte) 't') - { - result = TType.String; - } - else if (name[1] == (byte) 'e') - { - result = TType.Set; - } - break; - case (byte) 't': - result = TType.Bool; - break; - } - } - if (result == TType.Stop) - { - throw new TProtocolException(TProtocolException.NOT_IMPLEMENTED, "Unrecognized exType"); - } - return result; - } - /// /// Push a new JSON context onto the stack. /// - protected void PushContext(JsonBaseContext c) + protected void PushContext(JSONBaseContext c) { ContextStack.Push(Context); Context = c; @@ -219,94 +88,60 @@ namespace Thrift.Protocols /// Marked protected to avoid synthetic accessor in JSONListContext.Read /// and JSONPairContext.Read /// - protected async Task ReadJsonSyntaxCharAsync(byte[] b, CancellationToken cancellationToken) + protected async Task ReadJsonSyntaxCharAsync(byte[] bytes, CancellationToken cancellationToken) { var ch = await Reader.ReadAsync(cancellationToken); - if (ch != b[0]) + if (ch != bytes[0]) { throw new TProtocolException(TProtocolException.INVALID_DATA, $"Unexpected character: {(char) ch}"); } } - /// - /// Convert a byte containing a hex char ('0'-'9' or 'a'-'f') into its - /// corresponding hex value - /// - private static byte HexVal(byte ch) - { - if ((ch >= '0') && (ch <= '9')) - { - return (byte) ((char) ch - '0'); - } - - if ((ch >= 'a') && (ch <= 'f')) - { - ch += 10; - return (byte) ((char) ch - 'a'); - } - - throw new TProtocolException(TProtocolException.INVALID_DATA, "Expected hex character"); - } - - /// - /// Convert a byte containing a hex value to its corresponding hex character - /// - private static byte HexChar(byte val) - { - val &= 0x0F; - if (val < 10) - { - return (byte) ((char) val + '0'); - } - val -= 10; - return (byte) ((char) val + 'a'); - } - /// /// Write the bytes in array buf as a JSON characters, escaping as needed /// - private async Task WriteJsonStringAsync(byte[] b, CancellationToken cancellationToken) + private async Task WriteJsonStringAsync(byte[] bytes, CancellationToken cancellationToken) { await Context.WriteAsync(cancellationToken); - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); - var len = b.Length; + var len = bytes.Length; for (var i = 0; i < len; i++) { - if ((b[i] & 0x00FF) >= 0x30) + if ((bytes[i] & 0x00FF) >= 0x30) { - if (b[i] == Backslash[0]) + if (bytes[i] == TJSONProtocolConstants.Backslash[0]) { - await Trans.WriteAsync(Backslash, cancellationToken); - await Trans.WriteAsync(Backslash, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Backslash, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Backslash, cancellationToken); } else { - await Trans.WriteAsync(b, i, 1, cancellationToken); + await Trans.WriteAsync(bytes.ToArray(), i, 1, cancellationToken); } } else { - _tempBuffer[0] = _jsonCharTable[b[i]]; + _tempBuffer[0] = TJSONProtocolConstants.JsonCharTable[bytes[i]]; if (_tempBuffer[0] == 1) { - await Trans.WriteAsync(b, i, 1, cancellationToken); + await Trans.WriteAsync(bytes, i, 1, cancellationToken); } else if (_tempBuffer[0] > 1) { - await Trans.WriteAsync(Backslash, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Backslash, cancellationToken); await Trans.WriteAsync(_tempBuffer, 0, 1, cancellationToken); } else { - await Trans.WriteAsync(_escseq, cancellationToken); - _tempBuffer[0] = HexChar((byte) (b[i] >> 4)); - _tempBuffer[1] = HexChar(b[i]); + await Trans.WriteAsync(TJSONProtocolConstants.EscSequences, cancellationToken); + _tempBuffer[0] = TJSONProtocolHelper.ToHexChar((byte) (bytes[i] >> 4)); + _tempBuffer[1] = TJSONProtocolHelper.ToHexChar(bytes[i]); await Trans.WriteAsync(_tempBuffer, 0, 2, cancellationToken); } } } - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); } /// @@ -321,14 +156,15 @@ namespace Thrift.Protocols var escapeNum = Context.EscapeNumbers(); if (escapeNum) { - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); } - await Trans.WriteAsync(Utf8Encoding.GetBytes(str), cancellationToken); + var bytes = Utf8Encoding.GetBytes(str); + await Trans.WriteAsync(bytes, cancellationToken); if (escapeNum) { - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); } } @@ -361,14 +197,14 @@ namespace Thrift.Protocols if (escapeNum) { - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); } await Trans.WriteAsync(Utf8Encoding.GetBytes(str), cancellationToken); if (escapeNum) { - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); } } @@ -376,26 +212,18 @@ namespace Thrift.Protocols /// Write out contents of byte array b as a JSON string with base-64 encoded /// data /// - private async Task WriteJsonBase64Async(byte[] b, CancellationToken cancellationToken) + private async Task WriteJsonBase64Async(byte[] bytes, CancellationToken cancellationToken) { await Context.WriteAsync(cancellationToken); - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); - var len = b.Length; + var len = bytes.Length; var off = 0; - // Ignore padding - var bound = len >= 2 ? len - 2 : 0; - - for (var i = len - 1; i >= bound && b[i] == '='; --i) - { - --len; - } - while (len >= 3) { // Encode 3 bytes at a time - TBase64Utils.Encode(b, off, 3, _tempBuffer, 0); + TBase64Helper.Encode(bytes, off, 3, _tempBuffer, 0); await Trans.WriteAsync(_tempBuffer, 0, 4, cancellationToken); off += 3; len -= 3; @@ -404,37 +232,37 @@ namespace Thrift.Protocols if (len > 0) { // Encode remainder - TBase64Utils.Encode(b, off, len, _tempBuffer, 0); + TBase64Helper.Encode(bytes, off, len, _tempBuffer, 0); await Trans.WriteAsync(_tempBuffer, 0, len + 1, cancellationToken); } - await Trans.WriteAsync(Quote, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.Quote, cancellationToken); } private async Task WriteJsonObjectStartAsync(CancellationToken cancellationToken) { await Context.WriteAsync(cancellationToken); - await Trans.WriteAsync(Lbrace, cancellationToken); - PushContext(new JsonPairContext(this)); + await Trans.WriteAsync(TJSONProtocolConstants.LeftBrace, cancellationToken); + PushContext(new JSONPairContext(this)); } private async Task WriteJsonObjectEndAsync(CancellationToken cancellationToken) { PopContext(); - await Trans.WriteAsync(Rbrace, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.RightBrace, cancellationToken); } private async Task WriteJsonArrayStartAsync(CancellationToken cancellationToken) { await Context.WriteAsync(cancellationToken); - await Trans.WriteAsync(Lbracket, cancellationToken); - PushContext(new JsonListContext(this)); + await Trans.WriteAsync(TJSONProtocolConstants.LeftBracket, cancellationToken); + PushContext(new JSONListContext(this)); } private async Task WriteJsonArrayEndAsync(CancellationToken cancellationToken) { PopContext(); - await Trans.WriteAsync(Rbracket, cancellationToken); + await Trans.WriteAsync(TJSONProtocolConstants.RightBracket, cancellationToken); } public override async Task WriteMessageBeginAsync(TMessage message, CancellationToken cancellationToken) @@ -454,7 +282,7 @@ namespace Thrift.Protocols await WriteJsonArrayEndAsync(cancellationToken); } - public override async Task WriteStructBeginAsync(TStruct struc, CancellationToken cancellationToken) + public override async Task WriteStructBeginAsync(TStruct @struct, CancellationToken cancellationToken) { await WriteJsonObjectStartAsync(cancellationToken); } @@ -468,7 +296,7 @@ namespace Thrift.Protocols { await WriteJsonIntegerAsync(field.ID, cancellationToken); await WriteJsonObjectStartAsync(cancellationToken); - await WriteJsonStringAsync(GetTypeNameForTypeId(field.Type), cancellationToken); + await WriteJsonStringAsync(TJSONProtocolHelper.GetTypeNameForTypeId(field.Type), cancellationToken); } public override async Task WriteFieldEndAsync(CancellationToken cancellationToken) @@ -487,8 +315,8 @@ namespace Thrift.Protocols public override async Task WriteMapBeginAsync(TMap map, CancellationToken cancellationToken) { await WriteJsonArrayStartAsync(cancellationToken); - await WriteJsonStringAsync(GetTypeNameForTypeId(map.KeyType), cancellationToken); - await WriteJsonStringAsync(GetTypeNameForTypeId(map.ValueType), cancellationToken); + await WriteJsonStringAsync(TJSONProtocolHelper.GetTypeNameForTypeId(map.KeyType), cancellationToken); + await WriteJsonStringAsync(TJSONProtocolHelper.GetTypeNameForTypeId(map.ValueType), cancellationToken); await WriteJsonIntegerAsync(map.Count, cancellationToken); await WriteJsonObjectStartAsync(cancellationToken); } @@ -502,7 +330,7 @@ namespace Thrift.Protocols public override async Task WriteListBeginAsync(TList list, CancellationToken cancellationToken) { await WriteJsonArrayStartAsync(cancellationToken); - await WriteJsonStringAsync(GetTypeNameForTypeId(list.ElementType), cancellationToken); + await WriteJsonStringAsync(TJSONProtocolHelper.GetTypeNameForTypeId(list.ElementType), cancellationToken); await WriteJsonIntegerAsync(list.Count, cancellationToken); } @@ -514,7 +342,7 @@ namespace Thrift.Protocols public override async Task WriteSetBeginAsync(TSet set, CancellationToken cancellationToken) { await WriteJsonArrayStartAsync(cancellationToken); - await WriteJsonStringAsync(GetTypeNameForTypeId(set.ElementType), cancellationToken); + await WriteJsonStringAsync(TJSONProtocolHelper.GetTypeNameForTypeId(set.ElementType), cancellationToken); await WriteJsonIntegerAsync(set.Count, cancellationToken); } @@ -559,9 +387,9 @@ namespace Thrift.Protocols await WriteJsonStringAsync(b, cancellationToken); } - public override async Task WriteBinaryAsync(byte[] b, CancellationToken cancellationToken) + public override async Task WriteBinaryAsync(byte[] bytes, CancellationToken cancellationToken) { - await WriteJsonBase64Async(b, cancellationToken); + await WriteJsonBase64Async(bytes, cancellationToken); } /// @@ -580,18 +408,18 @@ namespace Thrift.Protocols await Context.ReadAsync(cancellationToken); } - await ReadJsonSyntaxCharAsync(Quote, cancellationToken); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.Quote, cancellationToken); while (true) { var ch = await Reader.ReadAsync(cancellationToken); - if (ch == Quote[0]) + if (ch == TJSONProtocolConstants.Quote[0]) { break; } // escaped? - if (ch != _escseq[0]) + if (ch != TJSONProtocolConstants.EscSequences[0]) { await buffer.WriteAsync(new[] {ch}, 0, 1, cancellationToken); continue; @@ -599,26 +427,25 @@ namespace Thrift.Protocols // distinguish between \uXXXX and \? ch = await Reader.ReadAsync(cancellationToken); - if (ch != _escseq[1]) // control chars like \n + if (ch != TJSONProtocolConstants.EscSequences[1]) // control chars like \n { - var off = Array.IndexOf(_escapeChars, (char) ch); + var off = Array.IndexOf(TJSONProtocolConstants.EscapeChars, (char) ch); if (off == -1) { throw new TProtocolException(TProtocolException.INVALID_DATA, "Expected control char"); } - ch = _escapeCharVals[off]; + ch = TJSONProtocolConstants.EscapeCharValues[off]; await buffer.WriteAsync(new[] {ch}, 0, 1, cancellationToken); continue; } - // it's \uXXXX await Trans.ReadAllAsync(_tempBuffer, 0, 4, cancellationToken); - var wch = (short) ((HexVal(_tempBuffer[0]) << 12) + - (HexVal(_tempBuffer[1]) << 8) + - (HexVal(_tempBuffer[2]) << 4) + - HexVal(_tempBuffer[3])); + var wch = (short) ((TJSONProtocolHelper.ToHexVal(_tempBuffer[0]) << 12) + + (TJSONProtocolHelper.ToHexVal(_tempBuffer[1]) << 8) + + (TJSONProtocolHelper.ToHexVal(_tempBuffer[2]) << 4) + + TJSONProtocolHelper.ToHexVal(_tempBuffer[3])); if (char.IsHighSurrogate((char) wch)) { @@ -656,34 +483,6 @@ namespace Thrift.Protocols } } - /// - /// Return true if the given byte could be a valid part of a JSON number. - /// - private static bool IsJsonNumeric(byte b) - { - switch (b) - { - case (byte) '+': - case (byte) '-': - case (byte) '.': - case (byte) '0': - case (byte) '1': - case (byte) '2': - case (byte) '3': - case (byte) '4': - case (byte) '5': - case (byte) '6': - case (byte) '7': - case (byte) '8': - case (byte) '9': - case (byte) 'E': - case (byte) 'e': - return true; - } - - return false; - } - /// /// Read in a sequence of characters that are all valid in JSON numbers. Does /// not do a complete regex check to validate that this is actually a number. @@ -693,12 +492,21 @@ namespace Thrift.Protocols var strbld = new StringBuilder(); while (true) { - var ch = await Reader.PeekAsync(cancellationToken); - if (!IsJsonNumeric(ch)) + //TODO: workaround for primitive types with TJsonProtocol, think - how to rewrite into more easy form without exceptions + try + { + var ch = await Reader.PeekAsync(cancellationToken); + if (!TJSONProtocolHelper.IsJsonNumeric(ch)) + { + break; + } + var c = (char)await Reader.ReadAsync(cancellationToken); + strbld.Append(c); + } + catch (TTransportException) { break; } - strbld.Append((char) await Reader.ReadAsync(cancellationToken)); } return strbld.ToString(); } @@ -711,13 +519,13 @@ namespace Thrift.Protocols await Context.ReadAsync(cancellationToken); if (Context.EscapeNumbers()) { - await ReadJsonSyntaxCharAsync(Quote, cancellationToken); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.Quote, cancellationToken); } var str = await ReadJsonNumericCharsAsync(cancellationToken); if (Context.EscapeNumbers()) { - await ReadJsonSyntaxCharAsync(Quote, cancellationToken); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.Quote, cancellationToken); } try @@ -737,7 +545,7 @@ namespace Thrift.Protocols private async Task ReadJsonDoubleAsync(CancellationToken cancellationToken) { await Context.ReadAsync(cancellationToken); - if (await Reader.PeekAsync(cancellationToken) == Quote[0]) + if (await Reader.PeekAsync(cancellationToken) == TJSONProtocolConstants.Quote[0]) { var arr = await ReadJsonStringAsync(true, cancellationToken); var dub = double.Parse(Utf8Encoding.GetString(arr, 0, arr.Length), CultureInfo.InvariantCulture); @@ -754,7 +562,7 @@ namespace Thrift.Protocols if (Context.EscapeNumbers()) { // This will throw - we should have had a quote if escapeNum == true - await ReadJsonSyntaxCharAsync(Quote, cancellationToken); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.Quote, cancellationToken); } try @@ -787,7 +595,7 @@ namespace Thrift.Protocols while (len > 4) { // Decode 4 bytes at a time - TBase64Utils.Decode(b, off, 4, b, size); // NB: decoded in place + TBase64Helper.Decode(b, off, 4, b, size); // NB: decoded in place off += 4; len -= 4; size += 3; @@ -798,7 +606,7 @@ namespace Thrift.Protocols if (len > 1) { // Decode remainder - TBase64Utils.Decode(b, off, len, b, size); // NB: decoded in place + TBase64Helper.Decode(b, off, len, b, size); // NB: decoded in place size += len - 1; } @@ -811,26 +619,26 @@ namespace Thrift.Protocols private async Task ReadJsonObjectStartAsync(CancellationToken cancellationToken) { await Context.ReadAsync(cancellationToken); - await ReadJsonSyntaxCharAsync(Lbrace, cancellationToken); - PushContext(new JsonPairContext(this)); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.LeftBrace, cancellationToken); + PushContext(new JSONPairContext(this)); } private async Task ReadJsonObjectEndAsync(CancellationToken cancellationToken) { - await ReadJsonSyntaxCharAsync(Rbrace, cancellationToken); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.RightBrace, cancellationToken); PopContext(); } private async Task ReadJsonArrayStartAsync(CancellationToken cancellationToken) { await Context.ReadAsync(cancellationToken); - await ReadJsonSyntaxCharAsync(Lbracket, cancellationToken); - PushContext(new JsonListContext(this)); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.LeftBracket, cancellationToken); + PushContext(new JSONListContext(this)); } private async Task ReadJsonArrayEndAsync(CancellationToken cancellationToken) { - await ReadJsonSyntaxCharAsync(Rbracket, cancellationToken); + await ReadJsonSyntaxCharAsync(TJSONProtocolConstants.RightBracket, cancellationToken); PopContext(); } @@ -870,7 +678,7 @@ namespace Thrift.Protocols { var field = new TField(); var ch = await Reader.PeekAsync(cancellationToken); - if (ch == Rbrace[0]) + if (ch == TJSONProtocolConstants.RightBrace[0]) { field.Type = TType.Stop; } @@ -878,7 +686,7 @@ namespace Thrift.Protocols { field.ID = (short) await ReadJsonIntegerAsync(cancellationToken); await ReadJsonObjectStartAsync(cancellationToken); - field.Type = GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); + field.Type = TJSONProtocolHelper.GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); } return field; } @@ -892,8 +700,8 @@ namespace Thrift.Protocols { var map = new TMap(); await ReadJsonArrayStartAsync(cancellationToken); - map.KeyType = GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); - map.ValueType = GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); + map.KeyType = TJSONProtocolHelper.GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); + map.ValueType = TJSONProtocolHelper.GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); map.Count = (int) await ReadJsonIntegerAsync(cancellationToken); await ReadJsonObjectStartAsync(cancellationToken); return map; @@ -909,7 +717,7 @@ namespace Thrift.Protocols { var list = new TList(); await ReadJsonArrayStartAsync(cancellationToken); - list.ElementType = GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); + list.ElementType = TJSONProtocolHelper.GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); list.Count = (int) await ReadJsonIntegerAsync(cancellationToken); return list; } @@ -923,7 +731,7 @@ namespace Thrift.Protocols { var set = new TSet(); await ReadJsonArrayStartAsync(cancellationToken); - set.ElementType = GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); + set.ElementType = TJSONProtocolHelper.GetTypeIdForTypeName(await ReadJsonStringAsync(false, cancellationToken)); set.Count = (int) await ReadJsonIntegerAsync(cancellationToken); return set; } @@ -990,11 +798,11 @@ namespace Thrift.Protocols /// inserting/Reading additional JSON syntax characters /// This base context does nothing. /// - protected class JsonBaseContext + protected class JSONBaseContext { protected TJsonProtocol Proto; - public JsonBaseContext(TJsonProtocol proto) + public JSONBaseContext(TJsonProtocol proto) { Proto = proto; } @@ -1025,11 +833,11 @@ namespace Thrift.Protocols /// Context for JSON lists. Will insert/Read commas before each item except /// for the first one /// - protected class JsonListContext : JsonBaseContext + protected class JSONListContext : JSONBaseContext { private bool _first = true; - public JsonListContext(TJsonProtocol protocol) + public JSONListContext(TJsonProtocol protocol) : base(protocol) { } @@ -1042,7 +850,7 @@ namespace Thrift.Protocols } else { - await Proto.Trans.WriteAsync(Comma, cancellationToken); + await Proto.Trans.WriteAsync(TJSONProtocolConstants.Comma, cancellationToken); } } @@ -1054,7 +862,7 @@ namespace Thrift.Protocols } else { - await Proto.ReadJsonSyntaxCharAsync(Comma, cancellationToken); + await Proto.ReadJsonSyntaxCharAsync(TJSONProtocolConstants.Comma, cancellationToken); } } } @@ -1065,13 +873,14 @@ namespace Thrift.Protocols /// addition, will indicate that numbers in the key position need to be /// escaped in quotes (since JSON keys must be strings). /// - protected class JsonPairContext : JsonBaseContext + // ReSharper disable once InconsistentNaming + protected class JSONPairContext : JSONBaseContext { private bool _colon = true; private bool _first = true; - public JsonPairContext(TJsonProtocol proto) + public JSONPairContext(TJsonProtocol proto) : base(proto) { } @@ -1085,7 +894,7 @@ namespace Thrift.Protocols } else { - await Proto.Trans.WriteAsync(_colon ? Colon : Comma, cancellationToken); + await Proto.Trans.WriteAsync(_colon ? TJSONProtocolConstants.Colon : TJSONProtocolConstants.Comma, cancellationToken); _colon = !_colon; } } @@ -1099,7 +908,7 @@ namespace Thrift.Protocols } else { - await Proto.ReadJsonSyntaxCharAsync(_colon ? Colon : Comma, cancellationToken); + await Proto.ReadJsonSyntaxCharAsync(_colon ? TJSONProtocolConstants.Colon : TJSONProtocolConstants.Comma, cancellationToken); _colon = !_colon; } } @@ -1142,6 +951,7 @@ namespace Thrift.Protocols } else { + // find more easy way to avoid exception on reading primitive types await Proto.Trans.ReadAllAsync(_data, 0, 1, cancellationToken); } return _data[0]; @@ -1160,6 +970,7 @@ namespace Thrift.Protocols if (!_hasData) { + // find more easy way to avoid exception on reading primitive types await Proto.Trans.ReadAllAsync(_data, 0, 1, cancellationToken); } _hasData = true; @@ -1167,4 +978,4 @@ namespace Thrift.Protocols } } } -} \ No newline at end of file +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TMultiplexedProtocol.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TMultiplexedProtocol.cs index 5b2202e25..367e4e644 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TMultiplexedProtocol.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TMultiplexedProtocol.cs @@ -74,22 +74,13 @@ namespace Thrift.Protocols _serviceName = serviceName; } - /** - * Prepends the service name to the function name, separated by TMultiplexedProtocol.SEPARATOR. - * Args: - * tMessage The original message. - */ - public override async Task WriteMessageBeginAsync(TMessage message, CancellationToken cancellationToken) { switch (message.Type) { case TMessageType.Call: case TMessageType.Oneway: - await - base.WriteMessageBeginAsync( - new TMessage($"{_serviceName}{Separator}{message.Name}", message.Type, message.SeqID), - cancellationToken); + await base.WriteMessageBeginAsync(new TMessage($"{_serviceName}{Separator}{message.Name}", message.Type, message.SeqID), cancellationToken); break; default: await base.WriteMessageBeginAsync(message, cancellationToken); @@ -97,4 +88,4 @@ namespace Thrift.Protocols } } } -} \ No newline at end of file +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocol.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocol.cs index 8fef8613b..91e009d63 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocol.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocol.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -27,7 +27,7 @@ namespace Thrift.Protocols // ReSharper disable once InconsistentNaming public abstract class TProtocol : IDisposable { - private const int DefaultRecursionDepth = 64; + public const int DefaultRecursionDepth = 64; private bool _isDisposed; protected int RecursionDepth; @@ -42,7 +42,6 @@ namespace Thrift.Protocols public TClientTransport Transport => Trans; - //TODO: check for protected protected int RecursionLimit { get; set; } public void Dispose() @@ -93,12 +92,12 @@ namespace Thrift.Protocols public abstract Task WriteMessageEndAsync(CancellationToken cancellationToken); - public virtual async Task WriteStructBeginAsync(TStruct struc) + public virtual async Task WriteStructBeginAsync(TStruct @struct) { - await WriteStructBeginAsync(struc, CancellationToken.None); + await WriteStructBeginAsync(@struct, CancellationToken.None); } - public abstract Task WriteStructBeginAsync(TStruct struc, CancellationToken cancellationToken); + public abstract Task WriteStructBeginAsync(TStruct @struct, CancellationToken cancellationToken); public virtual async Task WriteStructEndAsync() { @@ -223,12 +222,12 @@ namespace Thrift.Protocols await WriteBinaryAsync(bytes, cancellationToken); } - public virtual async Task WriteBinaryAsync(byte[] b) + public virtual async Task WriteBinaryAsync(byte[] bytes) { - await WriteBinaryAsync(b, CancellationToken.None); + await WriteBinaryAsync(bytes, CancellationToken.None); } - public abstract Task WriteBinaryAsync(byte[] b, CancellationToken cancellationToken); + public abstract Task WriteBinaryAsync(byte[] bytes, CancellationToken cancellationToken); public virtual async Task ReadMessageBeginAsync() { diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolDecorator.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolDecorator.cs index 458b1172a..3222754a8 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolDecorator.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolDecorator.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -36,12 +36,7 @@ namespace Thrift.Protocols protected TProtocolDecorator(TProtocol protocol) : base(protocol.Transport) { - if (protocol == null) - { - throw new ArgumentNullException(nameof(protocol)); - } - - _wrappedProtocol = protocol; + _wrappedProtocol = protocol ?? throw new ArgumentNullException(nameof(protocol)); } public override async Task WriteMessageBeginAsync(TMessage message, CancellationToken cancellationToken) @@ -54,9 +49,9 @@ namespace Thrift.Protocols await _wrappedProtocol.WriteMessageEndAsync(cancellationToken); } - public override async Task WriteStructBeginAsync(TStruct struc, CancellationToken cancellationToken) + public override async Task WriteStructBeginAsync(TStruct @struct, CancellationToken cancellationToken) { - await _wrappedProtocol.WriteStructBeginAsync(struc, cancellationToken); + await _wrappedProtocol.WriteStructBeginAsync(@struct, cancellationToken); } public override async Task WriteStructEndAsync(CancellationToken cancellationToken) @@ -144,9 +139,9 @@ namespace Thrift.Protocols await _wrappedProtocol.WriteStringAsync(s, cancellationToken); } - public override async Task WriteBinaryAsync(byte[] b, CancellationToken cancellationToken) + public override async Task WriteBinaryAsync(byte[] bytes, CancellationToken cancellationToken) { - await _wrappedProtocol.WriteBinaryAsync(b, cancellationToken); + await _wrappedProtocol.WriteBinaryAsync(bytes, cancellationToken); } public override async Task ReadMessageBeginAsync(CancellationToken cancellationToken) diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolException.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolException.cs index 02d0d3f31..8c67c3bfd 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolException.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/TProtocolException.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -15,11 +15,12 @@ // specific language governing permissions and limitations // under the License. +// ReSharper disable InconsistentNaming namespace Thrift.Protocols { public class TProtocolException : TException { - // do not rename public contants - they used in generated files + // do not rename public constants - they used in generated files public const int UNKNOWN = 0; public const int INVALID_DATA = 1; public const int NEGATIVE_SIZE = 2; diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TBase64Helper.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TBase64Helper.cs new file mode 100644 index 000000000..7eff5e181 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TBase64Helper.cs @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; + +namespace Thrift.Protocols.Utilities +{ + // ReSharper disable once InconsistentNaming + internal static class TBase64Helper + { + //TODO: Constants + //TODO: Check for args + //TODO: Unitests + + internal const string EncodeTable = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + private static readonly int[] DecodeTable = + { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, + -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, + -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 + }; + + internal static void Encode(byte[] src, int srcOff, int len, byte[] dst, int dstOff) + { + if (src == null) + { + throw new ArgumentNullException(nameof(src)); + } + + dst[dstOff] = (byte) EncodeTable[(src[srcOff] >> 2) & 0x3F]; + + if (len == 3) + { + dst[dstOff + 1] = (byte) EncodeTable[((src[srcOff] << 4) & 0x30) | ((src[srcOff + 1] >> 4) & 0x0F)]; + dst[dstOff + 2] = (byte) EncodeTable[((src[srcOff + 1] << 2) & 0x3C) | ((src[srcOff + 2] >> 6) & 0x03)]; + dst[dstOff + 3] = (byte) EncodeTable[src[srcOff + 2] & 0x3F]; + } + else if (len == 2) + { + dst[dstOff + 1] = (byte) EncodeTable[((src[srcOff] << 4) & 0x30) | ((src[srcOff + 1] >> 4) & 0x0F)]; + dst[dstOff + 2] = (byte) EncodeTable[(src[srcOff + 1] << 2) & 0x3C]; + } + else + { + // len == 1 + dst[dstOff + 1] = (byte) EncodeTable[(src[srcOff] << 4) & 0x30]; + } + } + + internal static void Decode(byte[] src, int srcOff, int len, byte[] dst, int dstOff) + { + if (src == null) + { + throw new ArgumentNullException(nameof(src)); + } + + dst[dstOff] = (byte) ((DecodeTable[src[srcOff] & 0x0FF] << 2) | (DecodeTable[src[srcOff + 1] & 0x0FF] >> 4)); + + if (len > 2) + { + dst[dstOff + 1] = + (byte) + (((DecodeTable[src[srcOff + 1] & 0x0FF] << 4) & 0xF0) | (DecodeTable[src[srcOff + 2] & 0x0FF] >> 2)); + if (len > 3) + { + dst[dstOff + 2] = + (byte) + (((DecodeTable[src[srcOff + 2] & 0x0FF] << 6) & 0xC0) | DecodeTable[src[srcOff + 3] & 0x0FF]); + } + } + } + } +} \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolConstants.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolConstants.cs new file mode 100644 index 000000000..93eff7855 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolConstants.cs @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +namespace Thrift.Protocols.Utilities +{ + // ReSharper disable once InconsistentNaming + public static class TJSONProtocolConstants + { + //TODO Check for performance for reusing ImmutableArray from System.Collections.Immutable (https://blogs.msdn.microsoft.com/dotnet/2013/06/24/please-welcome-immutablearrayt/) + // can be possible to get better performance and also better GC + + public static readonly byte[] Comma = {(byte) ','}; + public static readonly byte[] Colon = {(byte) ':'}; + public static readonly byte[] LeftBrace = {(byte) '{'}; + public static readonly byte[] RightBrace = {(byte) '}'}; + public static readonly byte[] LeftBracket = {(byte) '['}; + public static readonly byte[] RightBracket = {(byte) ']'}; + public static readonly byte[] Quote = {(byte) '"'}; + public static readonly byte[] Backslash = {(byte) '\\'}; + + public static readonly byte[] JsonCharTable = + { + 0, 0, 0, 0, 0, 0, 0, 0, (byte) 'b', (byte) 't', (byte) 'n', 0, (byte) 'f', (byte) 'r', 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, (byte) '"', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + }; + + public static readonly char[] EscapeChars = "\"\\/bfnrt".ToCharArray(); + public static readonly byte[] EscapeCharValues = {(byte) '"', (byte) '\\', (byte) '/', (byte) '\b', (byte) '\f', (byte) '\n', (byte) '\r', (byte) '\t'}; + public static readonly byte[] EscSequences = {(byte) '\\', (byte) 'u', (byte) '0', (byte) '0'}; + + public static class TypeNames + { + public static readonly byte[] NameBool = { (byte)'t', (byte)'f' }; + public static readonly byte[] NameByte = { (byte)'i', (byte)'8' }; + public static readonly byte[] NameI16 = { (byte)'i', (byte)'1', (byte)'6' }; + public static readonly byte[] NameI32 = { (byte)'i', (byte)'3', (byte)'2' }; + public static readonly byte[] NameI64 = { (byte)'i', (byte)'6', (byte)'4' }; + public static readonly byte[] NameDouble = { (byte)'d', (byte)'b', (byte)'l' }; + public static readonly byte[] NameStruct = { (byte)'r', (byte)'e', (byte)'c' }; + public static readonly byte[] NameString = { (byte)'s', (byte)'t', (byte)'r' }; + public static readonly byte[] NameMap = { (byte)'m', (byte)'a', (byte)'p' }; + public static readonly byte[] NameList = { (byte)'l', (byte)'s', (byte)'t' }; + public static readonly byte[] NameSet = { (byte)'s', (byte)'e', (byte)'t' }; + } + } +} \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolHelper.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolHelper.cs new file mode 100644 index 000000000..adc26a9af --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TJsonProtocolHelper.cs @@ -0,0 +1,176 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using Thrift.Protocols.Entities; + +namespace Thrift.Protocols.Utilities +{ + // ReSharper disable once InconsistentNaming + public static class TJSONProtocolHelper + { + public static byte[] GetTypeNameForTypeId(TType typeId) + { + switch (typeId) + { + case TType.Bool: + return TJSONProtocolConstants.TypeNames.NameBool; + case TType.Byte: + return TJSONProtocolConstants.TypeNames.NameByte; + case TType.I16: + return TJSONProtocolConstants.TypeNames.NameI16; + case TType.I32: + return TJSONProtocolConstants.TypeNames.NameI32; + case TType.I64: + return TJSONProtocolConstants.TypeNames.NameI64; + case TType.Double: + return TJSONProtocolConstants.TypeNames.NameDouble; + case TType.String: + return TJSONProtocolConstants.TypeNames.NameString; + case TType.Struct: + return TJSONProtocolConstants.TypeNames.NameStruct; + case TType.Map: + return TJSONProtocolConstants.TypeNames.NameMap; + case TType.Set: + return TJSONProtocolConstants.TypeNames.NameSet; + case TType.List: + return TJSONProtocolConstants.TypeNames.NameList; + default: + throw new TProtocolException(TProtocolException.NOT_IMPLEMENTED, "Unrecognized exType"); + } + } + + public static TType GetTypeIdForTypeName(byte[] name) + { + var result = TType.Stop; + if (name.Length > 1) + { + switch (name[0]) + { + case (byte) 'd': + result = TType.Double; + break; + case (byte) 'i': + switch (name[1]) + { + case (byte) '8': + result = TType.Byte; + break; + case (byte) '1': + result = TType.I16; + break; + case (byte) '3': + result = TType.I32; + break; + case (byte) '6': + result = TType.I64; + break; + } + break; + case (byte) 'l': + result = TType.List; + break; + case (byte) 'm': + result = TType.Map; + break; + case (byte) 'r': + result = TType.Struct; + break; + case (byte) 's': + if (name[1] == (byte) 't') + { + result = TType.String; + } + else if (name[1] == (byte) 'e') + { + result = TType.Set; + } + break; + case (byte) 't': + result = TType.Bool; + break; + } + } + if (result == TType.Stop) + { + throw new TProtocolException(TProtocolException.NOT_IMPLEMENTED, "Unrecognized exType"); + } + return result; + } + + /// + /// Return true if the given byte could be a valid part of a JSON number. + /// + public static bool IsJsonNumeric(byte b) + { + switch (b) + { + case (byte)'+': + case (byte)'-': + case (byte)'.': + case (byte)'0': + case (byte)'1': + case (byte)'2': + case (byte)'3': + case (byte)'4': + case (byte)'5': + case (byte)'6': + case (byte)'7': + case (byte)'8': + case (byte)'9': + case (byte)'E': + case (byte)'e': + return true; + default: + return false; + } + } + + /// + /// Convert a byte containing a hex char ('0'-'9' or 'a'-'f') into its + /// corresponding hex value + /// + public static byte ToHexVal(byte ch) + { + if (ch >= '0' && ch <= '9') + { + return (byte)((char)ch - '0'); + } + + if (ch >= 'a' && ch <= 'f') + { + ch += 10; + return (byte)((char)ch - 'a'); + } + + throw new TProtocolException(TProtocolException.INVALID_DATA, "Expected hex character"); + } + + /// + /// Convert a byte containing a hex value to its corresponding hex character + /// + public static byte ToHexChar(byte val) + { + val &= 0x0F; + if (val < 10) + { + return (byte)((char)val + '0'); + } + val -= 10; + return (byte)((char)val + 'a'); + } + } +} \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TProtocolUtil.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TProtocolUtil.cs index 038edb9df..50b038566 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TProtocolUtil.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Protocols/Utilities/TProtocolUtil.cs @@ -19,83 +19,83 @@ using System.Threading; using System.Threading.Tasks; using Thrift.Protocols.Entities; -namespace Thrift.Protocols +namespace Thrift.Protocols.Utilities { // ReSharper disable once InconsistentNaming public static class TProtocolUtil { - public static async Task SkipAsync(TProtocol prot, TType type, CancellationToken cancellationToken) + public static async Task SkipAsync(TProtocol protocol, TType type, CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { await Task.FromCanceled(cancellationToken); } - prot.IncrementRecursionDepth(); + protocol.IncrementRecursionDepth(); try { switch (type) { case TType.Bool: - await prot.ReadBoolAsync(cancellationToken); + await protocol.ReadBoolAsync(cancellationToken); break; case TType.Byte: - await prot.ReadByteAsync(cancellationToken); + await protocol.ReadByteAsync(cancellationToken); break; case TType.I16: - await prot.ReadI16Async(cancellationToken); + await protocol.ReadI16Async(cancellationToken); break; case TType.I32: - await prot.ReadI32Async(cancellationToken); + await protocol.ReadI32Async(cancellationToken); break; case TType.I64: - await prot.ReadI64Async(cancellationToken); + await protocol.ReadI64Async(cancellationToken); break; case TType.Double: - await prot.ReadDoubleAsync(cancellationToken); + await protocol.ReadDoubleAsync(cancellationToken); break; case TType.String: // Don't try to decode the string, just skip it. - await prot.ReadBinaryAsync(cancellationToken); + await protocol.ReadBinaryAsync(cancellationToken); break; case TType.Struct: - await prot.ReadStructBeginAsync(cancellationToken); + await protocol.ReadStructBeginAsync(cancellationToken); while (true) { - var field = await prot.ReadFieldBeginAsync(cancellationToken); + var field = await protocol.ReadFieldBeginAsync(cancellationToken); if (field.Type == TType.Stop) { break; } - await SkipAsync(prot, field.Type, cancellationToken); - await prot.ReadFieldEndAsync(cancellationToken); + await SkipAsync(protocol, field.Type, cancellationToken); + await protocol.ReadFieldEndAsync(cancellationToken); } - await prot.ReadStructEndAsync(cancellationToken); + await protocol.ReadStructEndAsync(cancellationToken); break; case TType.Map: - var map = await prot.ReadMapBeginAsync(cancellationToken); + var map = await protocol.ReadMapBeginAsync(cancellationToken); for (var i = 0; i < map.Count; i++) { - await SkipAsync(prot, map.KeyType, cancellationToken); - await SkipAsync(prot, map.ValueType, cancellationToken); + await SkipAsync(protocol, map.KeyType, cancellationToken); + await SkipAsync(protocol, map.ValueType, cancellationToken); } - await prot.ReadMapEndAsync(cancellationToken); + await protocol.ReadMapEndAsync(cancellationToken); break; case TType.Set: - var set = await prot.ReadSetBeginAsync(cancellationToken); + var set = await protocol.ReadSetBeginAsync(cancellationToken); for (var i = 0; i < set.Count; i++) { - await SkipAsync(prot, set.ElementType, cancellationToken); + await SkipAsync(protocol, set.ElementType, cancellationToken); } - await prot.ReadSetEndAsync(cancellationToken); + await protocol.ReadSetEndAsync(cancellationToken); break; case TType.List: - var list = await prot.ReadListBeginAsync(cancellationToken); + var list = await protocol.ReadListBeginAsync(cancellationToken); for (var i = 0; i < list.Count; i++) { - await SkipAsync(prot, list.ElementType, cancellationToken); + await SkipAsync(protocol, list.ElementType, cancellationToken); } - await prot.ReadListEndAsync(cancellationToken); + await protocol.ReadListEndAsync(cancellationToken); break; default: throw new TProtocolException(TProtocolException.INVALID_DATA, "Unknown data type " + type.ToString("d")); @@ -103,8 +103,8 @@ namespace Thrift.Protocols } finally { - prot.DecrementRecursionDepth(); + protocol.DecrementRecursionDepth(); } } } -} \ No newline at end of file +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/AsyncBaseServer.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/AsyncBaseServer.cs index 5ff7a32a2..325c39c71 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/AsyncBaseServer.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/AsyncBaseServer.cs @@ -57,8 +57,7 @@ namespace Thrift.Server try { // cancelation token - _serverTask = Task.Factory.StartNew(() => StartListening(cancellationToken), - TaskCreationOptions.LongRunning); + _serverTask = Task.Factory.StartNew(() => StartListening(cancellationToken), TaskCreationOptions.LongRunning); await _serverTask; } catch (Exception ex) @@ -87,7 +86,7 @@ namespace Thrift.Server try { var client = await ServerTransport.AcceptAsync(cancellationToken); - await Task.Factory.StartNew(() => Execute(client, cancellationToken)); + await Task.Factory.StartNew(() => Execute(client, cancellationToken), cancellationToken); } catch (TTransportException ttx) { @@ -101,7 +100,11 @@ namespace Thrift.Server } else { - await Task.Delay(TimeSpan.FromMilliseconds(_clientWaitingDelay), cancellationToken); + try + { + await Task.Delay(TimeSpan.FromMilliseconds(_clientWaitingDelay), cancellationToken); + } + catch(TaskCanceledException) { } } } @@ -136,8 +139,7 @@ namespace Thrift.Server if (ServerEventHandler != null) { - connectionContext = - await ServerEventHandler.CreateContextAsync(inputProtocol, outputProtocol, cancellationToken); + connectionContext = await ServerEventHandler.CreateContextAsync(inputProtocol, outputProtocol, cancellationToken); } while (!cancellationToken.IsCancellationRequested) @@ -149,8 +151,7 @@ namespace Thrift.Server if (ServerEventHandler != null) { - await - ServerEventHandler.ProcessContextAsync(connectionContext, inputTransport, cancellationToken); + await ServerEventHandler.ProcessContextAsync(connectionContext, inputTransport, cancellationToken); } if (!await processor.ProcessAsync(inputProtocol, outputProtocol, cancellationToken)) @@ -170,9 +171,7 @@ namespace Thrift.Server if (ServerEventHandler != null) { - await - ServerEventHandler.DeleteContextAsync(connectionContext, inputProtocol, outputProtocol, - cancellationToken); + await ServerEventHandler.DeleteContextAsync(connectionContext, inputProtocol, outputProtocol, cancellationToken); } inputTransport?.Close(); @@ -181,4 +180,4 @@ namespace Thrift.Server Logger.LogTrace("Completed client request processing"); } } -} \ No newline at end of file +} diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/TBaseServer.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/TBaseServer.cs index 97cc7ff9e..741dd5c95 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/TBaseServer.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Server/TBaseServer.cs @@ -42,20 +42,13 @@ namespace Thrift.Server ITProtocolFactory inputProtocolFactory, ITProtocolFactory outputProtocolFactory, ILogger logger) { - if (itProcessorFactory == null) throw new ArgumentNullException(nameof(itProcessorFactory)); - if (inputTransportFactory == null) throw new ArgumentNullException(nameof(inputTransportFactory)); - if (outputTransportFactory == null) throw new ArgumentNullException(nameof(outputTransportFactory)); - if (inputProtocolFactory == null) throw new ArgumentNullException(nameof(inputProtocolFactory)); - if (outputProtocolFactory == null) throw new ArgumentNullException(nameof(outputProtocolFactory)); - if (logger == null) throw new ArgumentNullException(nameof(logger)); - - ItProcessorFactory = itProcessorFactory; + ItProcessorFactory = itProcessorFactory ?? throw new ArgumentNullException(nameof(itProcessorFactory)); ServerTransport = serverTransport; - InputTransportFactory = inputTransportFactory; - OutputTransportFactory = outputTransportFactory; - InputProtocolFactory = inputProtocolFactory; - OutputProtocolFactory = outputProtocolFactory; - Logger = logger; + InputTransportFactory = inputTransportFactory ?? throw new ArgumentNullException(nameof(inputTransportFactory)); + OutputTransportFactory = outputTransportFactory ?? throw new ArgumentNullException(nameof(outputTransportFactory)); + InputProtocolFactory = inputProtocolFactory ?? throw new ArgumentNullException(nameof(inputProtocolFactory)); + OutputProtocolFactory = outputProtocolFactory ?? throw new ArgumentNullException(nameof(outputProtocolFactory)); + Logger = logger ?? throw new ArgumentNullException(nameof(logger)); } public void SetEventHandler(TServerEventHandler seh) diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TApplicationException.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TApplicationException.cs index f1ea25258..9ec145a85 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TApplicationException.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TApplicationException.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -19,6 +19,7 @@ using System.Threading; using System.Threading.Tasks; using Thrift.Protocols; using Thrift.Protocols.Entities; +using Thrift.Protocols.Utilities; namespace Thrift { @@ -60,15 +61,15 @@ namespace Thrift Type = type; } - public static async Task ReadAsync(TProtocol iprot, CancellationToken cancellationToken) + public static async Task ReadAsync(TProtocol inputProtocol, CancellationToken cancellationToken) { string message = null; var type = ExceptionType.Unknown; - await iprot.ReadStructBeginAsync(cancellationToken); + await inputProtocol.ReadStructBeginAsync(cancellationToken); while (true) { - var field = await iprot.ReadFieldBeginAsync(cancellationToken); + var field = await inputProtocol.ReadFieldBeginAsync(cancellationToken); if (field.Type == TType.Stop) { break; @@ -79,37 +80,37 @@ namespace Thrift case MessageTypeFieldId: if (field.Type == TType.String) { - message = await iprot.ReadStringAsync(cancellationToken); + message = await inputProtocol.ReadStringAsync(cancellationToken); } else { - await TProtocolUtil.SkipAsync(iprot, field.Type, cancellationToken); + await TProtocolUtil.SkipAsync(inputProtocol, field.Type, cancellationToken); } break; case ExTypeFieldId: if (field.Type == TType.I32) { - type = (ExceptionType) await iprot.ReadI32Async(cancellationToken); + type = (ExceptionType) await inputProtocol.ReadI32Async(cancellationToken); } else { - await TProtocolUtil.SkipAsync(iprot, field.Type, cancellationToken); + await TProtocolUtil.SkipAsync(inputProtocol, field.Type, cancellationToken); } break; default: - await TProtocolUtil.SkipAsync(iprot, field.Type, cancellationToken); + await TProtocolUtil.SkipAsync(inputProtocol, field.Type, cancellationToken); break; } - await iprot.ReadFieldEndAsync(cancellationToken); + await inputProtocol.ReadFieldEndAsync(cancellationToken); } - await iprot.ReadStructEndAsync(cancellationToken); + await inputProtocol.ReadStructEndAsync(cancellationToken); return new TApplicationException(type, message); } - public async Task WriteAsync(TProtocol oprot, CancellationToken cancellationToken) + public async Task WriteAsync(TProtocol outputProtocol, CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { @@ -123,27 +124,27 @@ namespace Thrift var struc = new TStruct(structApplicationExceptionName); var field = new TField(); - await oprot.WriteStructBeginAsync(struc, cancellationToken); + await outputProtocol.WriteStructBeginAsync(struc, cancellationToken); if (!string.IsNullOrEmpty(Message)) { field.Name = messageTypeFieldName; field.Type = TType.String; field.ID = MessageTypeFieldId; - await oprot.WriteFieldBeginAsync(field, cancellationToken); - await oprot.WriteStringAsync(Message, cancellationToken); - await oprot.WriteFieldEndAsync(cancellationToken); + await outputProtocol.WriteFieldBeginAsync(field, cancellationToken); + await outputProtocol.WriteStringAsync(Message, cancellationToken); + await outputProtocol.WriteFieldEndAsync(cancellationToken); } field.Name = exTypeFieldName; field.Type = TType.I32; field.ID = ExTypeFieldId; - await oprot.WriteFieldBeginAsync(field, cancellationToken); - await oprot.WriteI32Async((int) Type, cancellationToken); - await oprot.WriteFieldEndAsync(cancellationToken); - await oprot.WriteFieldStopAsync(cancellationToken); - await oprot.WriteStructEndAsync(cancellationToken); + await outputProtocol.WriteFieldBeginAsync(field, cancellationToken); + await outputProtocol.WriteI32Async((int) Type, cancellationToken); + await outputProtocol.WriteFieldEndAsync(cancellationToken); + await outputProtocol.WriteFieldStopAsync(cancellationToken); + await outputProtocol.WriteStructEndAsync(cancellationToken); } } } \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TBaseClient.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TBaseClient.cs index ca403e5c9..e01925153 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TBaseClient.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/TBaseClient.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -16,14 +16,13 @@ // under the License. using System; -using System.Collections.Generic; -using System.Linq; using System.Threading; using System.Threading.Tasks; using Thrift.Protocols; namespace Thrift { + // ReSharper disable once InconsistentNaming /// /// TBaseClient. /// Base client for generated clients. @@ -39,18 +38,8 @@ namespace Thrift protected TBaseClient(TProtocol inputProtocol, TProtocol outputProtocol) { - if (inputProtocol == null) - { - throw new ArgumentNullException(nameof(inputProtocol)); - } - - if (outputProtocol == null) - { - throw new ArgumentNullException(nameof(outputProtocol)); - } - - _inputProtocol = inputProtocol; - _outputProtocol = outputProtocol; + _inputProtocol = inputProtocol ?? throw new ArgumentNullException(nameof(inputProtocol)); + _outputProtocol = outputProtocol ?? throw new ArgumentNullException(nameof(outputProtocol)); } public TProtocol InputProtocol => _inputProtocol; diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TBufferedClientTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TBufferedClientTransport.cs index 86eb735dc..761f1ac78 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TBufferedClientTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TBufferedClientTransport.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -34,17 +34,12 @@ namespace Thrift.Transports.Client //TODO: should support only specified input transport? public TBufferedClientTransport(TClientTransport transport, int bufSize = 1024) { - if (transport == null) - { - throw new ArgumentNullException(nameof(transport)); - } - if (bufSize <= 0) { throw new ArgumentOutOfRangeException(nameof(bufSize), "Buffer size must be a positive number."); } - _transport = transport; + _transport = transport ?? throw new ArgumentNullException(nameof(transport)); _bufSize = bufSize; } @@ -200,8 +195,9 @@ namespace Thrift.Transports.Client { if (disposing) { - _inputBuffer.Dispose(); - _outputBuffer.Dispose(); + _inputBuffer?.Dispose(); + _outputBuffer?.Dispose(); + _transport?.Dispose(); } } _isDisposed = true; diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TFramedClientTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TFramedClientTransport.cs index f54a42a86..d11bb959a 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TFramedClientTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TFramedClientTransport.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -37,12 +37,7 @@ namespace Thrift.Transports.Client public TFramedClientTransport(TClientTransport transport) { - if (transport == null) - { - throw new ArgumentNullException(nameof(transport)); - } - - _transport = transport; + _transport = transport ?? throw new ArgumentNullException(nameof(transport)); InitWriteBuffer(); } @@ -195,8 +190,9 @@ namespace Thrift.Transports.Client { if (disposing) { - _readBuffer.Dispose(); - _writeBuffer.Dispose(); + _readBuffer?.Dispose(); + _writeBuffer?.Dispose(); + _transport?.Dispose(); } } _isDisposed = true; diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/THttpClientTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/THttpClientTransport.cs index 1e83f97e9..16754b258 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/THttpClientTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/THttpClientTransport.cs @@ -16,11 +16,9 @@ // under the License. using System; -using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; using System.Linq; -using System.Net; using System.Net.Http; using System.Net.Http.Headers; using System.Security.Cryptography.X509Certificates; @@ -151,7 +149,7 @@ namespace Thrift.Transports.Client } httpClient.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/x-thrift")); - httpClient.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue("THttpClientTransport", "0.11.0")); + httpClient.DefaultRequestHeaders.UserAgent.Add(new ProductInfoHeaderValue("THttpClientTransport", "1.0.0")); if (CustomHeaders != null) { @@ -198,7 +196,7 @@ namespace Thrift.Transports.Client { throw new TTransportException(TTransportException.ExceptionType.Unknown, iox.ToString()); } - catch (WebException wx) + catch (HttpRequestException wx) { throw new TTransportException(TTransportException.ExceptionType.Unknown, "Couldn't connect to server: " + wx); diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TSocketClientTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TSocketClientTransport.cs index a44efe677..e769d1421 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TSocketClientTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TSocketClientTransport.cs @@ -30,12 +30,7 @@ namespace Thrift.Transports.Client public TSocketClientTransport(TcpClient client) { - if (client == null) - { - throw new ArgumentNullException(nameof(client)); - } - - TcpClient = client; + TcpClient = client ?? throw new ArgumentNullException(nameof(client)); if (IsOpen) { diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TTlsSocketClientTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TTlsSocketClientTransport.cs index a21977b20..c8be4ede1 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TTlsSocketClientTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Client/TTlsSocketClientTransport.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -57,7 +57,7 @@ namespace Thrift.Transports.Client if (isServer && certificate == null) { throw new ArgumentException("TTlsSocketClientTransport needs certificate to be used for server", - "certificate"); + nameof(certificate)); } if (IsOpen) @@ -204,7 +204,8 @@ namespace Thrift.Transports.Client ? new X509CertificateCollection {_certificate} : new X509CertificateCollection(); - await _secureStream.AuthenticateAsClientAsync(_host.ToString(), certs, _sslProtocols, true); + var targetHost = _host.ToString(); + await _secureStream.AuthenticateAsClientAsync(targetHost, certs, _sslProtocols, true); } } catch (Exception) diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/THttpServerTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/THttpServerTransport.cs index 607374135..032063a37 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/THttpServerTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/THttpServerTransport.cs @@ -53,29 +53,14 @@ namespace Thrift.Transports.Server public THttpServerTransport(ITAsyncProcessor processor, ITProtocolFactory inputProtocolFactory, ITProtocolFactory outputProtocolFactory, RequestDelegate next, ILoggerFactory loggerFactory) { - if (processor == null) - { - throw new ArgumentNullException(nameof(processor)); - } - - if (inputProtocolFactory == null) - { - throw new ArgumentNullException(nameof(inputProtocolFactory)); - } - - if (outputProtocolFactory == null) - { - throw new ArgumentNullException(nameof(outputProtocolFactory)); - } - if (loggerFactory == null) { throw new ArgumentNullException(nameof(loggerFactory)); } - Processor = processor; - InputProtocolFactory = inputProtocolFactory; - OutputProtocolFactory = outputProtocolFactory; + Processor = processor ?? throw new ArgumentNullException(nameof(processor)); + InputProtocolFactory = inputProtocolFactory ?? throw new ArgumentNullException(nameof(inputProtocolFactory)); + OutputProtocolFactory = outputProtocolFactory ?? throw new ArgumentNullException(nameof(outputProtocolFactory)); _next = next; _logger = loggerFactory.CreateLogger(); diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TNamedPipeServerTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TNamedPipeServerTransport.cs index 01195d4a4..186786ed2 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TNamedPipeServerTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TNamedPipeServerTransport.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TServerSocketTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TServerSocketTransport.cs index af154ef6f..3a9d8a17d 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TServerSocketTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TServerSocketTransport.cs @@ -30,6 +30,7 @@ namespace Thrift.Transports.Server private readonly int _clientTimeout; private readonly int _port; private readonly bool _useBufferedSockets; + private readonly bool _useFramedTransport; private TcpListener _server; public TServerSocketTransport(TcpListener listener) @@ -53,11 +54,17 @@ namespace Thrift.Transports.Server { } - public TServerSocketTransport(int port, int clientTimeout, bool useBufferedSockets) + public TServerSocketTransport(int port, int clientTimeout, bool useBufferedSockets): + this(port, clientTimeout, useBufferedSockets, false) + { + } + + public TServerSocketTransport(int port, int clientTimeout, bool useBufferedSockets, bool useFramedTransport) { _port = port; _clientTimeout = clientTimeout; _useBufferedSockets = useBufferedSockets; + _useFramedTransport = useFramedTransport; try { // Make server socket @@ -106,7 +113,7 @@ namespace Thrift.Transports.Server try { - TSocketClientTransport tSocketTransport = null; + TClientTransport tSocketTransport = null; var tcpClient = await _server.AcceptTcpClientAsync(); try @@ -118,7 +125,12 @@ namespace Thrift.Transports.Server if (_useBufferedSockets) { - return new TBufferedClientTransport(tSocketTransport); + tSocketTransport = new TBufferedClientTransport(tSocketTransport); + } + + if (_useFramedTransport) + { + tSocketTransport = new TFramedClientTransport(tSocketTransport); } return tSocketTransport; diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TTlsServerSocketTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TTlsServerSocketTransport.cs index 49abdac86..759feeddd 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TTlsServerSocketTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/Server/TTlsServerSocketTransport.cs @@ -37,6 +37,7 @@ namespace Thrift.Transports.Server private readonly X509Certificate2 _serverCertificate; private readonly SslProtocols _sslProtocols; private readonly bool _useBufferedSockets; + private readonly bool _useFramedTransport; private TcpListener _server; public TTlsServerSocketTransport(int port, X509Certificate2 certificate) @@ -50,6 +51,19 @@ namespace Thrift.Transports.Server X509Certificate2 certificate, RemoteCertificateValidationCallback clientCertValidator = null, LocalCertificateSelectionCallback localCertificateSelectionCallback = null, + SslProtocols sslProtocols = SslProtocols.Tls12) + : this(port, useBufferedSockets, false, certificate, + clientCertValidator, localCertificateSelectionCallback, sslProtocols) + { + } + + public TTlsServerSocketTransport( + int port, + bool useBufferedSockets, + bool useFramedTransport, + X509Certificate2 certificate, + RemoteCertificateValidationCallback clientCertValidator = null, + LocalCertificateSelectionCallback localCertificateSelectionCallback = null, SslProtocols sslProtocols = SslProtocols.Tls12) { if (!certificate.HasPrivateKey) @@ -61,6 +75,7 @@ namespace Thrift.Transports.Server _port = port; _serverCertificate = certificate; _useBufferedSockets = useBufferedSockets; + _useFramedTransport = useFramedTransport; _clientCertValidator = clientCertValidator; _localCertificateSelectionCallback = localCertificateSelectionCallback; _sslProtocols = sslProtocols; @@ -122,13 +137,19 @@ namespace Thrift.Transports.Server await tTlsSocket.SetupTlsAsync(); + TClientTransport trans = tTlsSocket; + if (_useBufferedSockets) { - var trans = new TBufferedClientTransport(tTlsSocket); - return trans; + trans = new TBufferedClientTransport(trans); } - return tTlsSocket; + if (_useFramedTransport) + { + trans = new TFramedClientTransport(trans); + } + + return trans; } catch (Exception ex) { diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TClientTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TClientTransport.cs index cee0a0075..0dd96cb36 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TClientTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TClientTransport.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -26,6 +26,7 @@ namespace Thrift.Transports // ReSharper disable once InconsistentNaming public abstract class TClientTransport : IDisposable { + //TODO: think how to avoid peek byte private readonly byte[] _peekBuffer = new byte[1]; private bool _hasPeekByte; public abstract bool IsOpen { get; } diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TServerTransport.cs b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TServerTransport.cs index d49feb6a0..0d45a55f9 100644 --- a/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TServerTransport.cs +++ b/vendor/git.apache.org/thrift.git/lib/netcore/Thrift/Transports/TServerTransport.cs @@ -45,7 +45,7 @@ namespace Thrift.Transports if (transport == null) { - throw new TTransportException("AcceptAsync() should not return null"); + throw new TTransportException($"{nameof(AcceptImplementationAsync)} should not return null"); } return transport; diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/build.cmd b/vendor/git.apache.org/thrift.git/lib/netcore/build.cmd new file mode 100644 index 000000000..863c4b45e --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/build.cmd @@ -0,0 +1,27 @@ +@echo off +rem /* +rem * Licensed to the Apache Software Foundation (ASF) under one +rem * or more contributor license agreements. See the NOTICE file +rem * distributed with this work for additional information +rem * regarding copyright ownership. The ASF licenses this file +rem * to you under the Apache License, Version 2.0 (the +rem * "License"); you may not use this file except in compliance +rem * with the License. You may obtain a copy of the License at +rem * +rem * http://www.apache.org/licenses/LICENSE-2.0 +rem * +rem * Unless required by applicable law or agreed to in writing, +rem * software distributed under the License is distributed on an +rem * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +rem * KIND, either express or implied. See the License for the +rem * specific language governing permissions and limitations +rem * under the License. +rem */ + +setlocal + +thrift -version +dotnet --info +dotnet build + +:eof diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/build.sh b/vendor/git.apache.org/thrift.git/lib/netcore/build.sh new file mode 100644 index 000000000..ae18bce9b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/build.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +#exit if any command fails +#set -e + +thrift --version +dotnet --info +dotnet build + +#revision=${TRAVIS_JOB_ID:=1} +#revision=$(printf "%04d" $revision) + +#dotnet pack ./src/PROJECT_NAME -c Release -o ./artifacts --version-suffix=$revision diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/runtests.cmd b/vendor/git.apache.org/thrift.git/lib/netcore/runtests.cmd new file mode 100644 index 000000000..5114bc594 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/runtests.cmd @@ -0,0 +1,28 @@ +@echo off +rem /* +rem * Licensed to the Apache Software Foundation (ASF) under one +rem * or more contributor license agreements. See the NOTICE file +rem * distributed with this work for additional information +rem * regarding copyright ownership. The ASF licenses this file +rem * to you under the Apache License, Version 2.0 (the +rem * "License"); you may not use this file except in compliance +rem * with the License. You may obtain a copy of the License at +rem * +rem * http://www.apache.org/licenses/LICENSE-2.0 +rem * +rem * Unless required by applicable law or agreed to in writing, +rem * software distributed under the License is distributed on an +rem * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +rem * KIND, either express or implied. See the License for the +rem * specific language governing permissions and limitations +rem * under the License. +rem */ +setlocal + +thrift -version +dotnet --info + +dotnet test Tests\Thrift.IntegrationTests\Thrift.IntegrationTests.csproj +dotnet test Tests\Thrift.Tests\Thrift.Tests.csproj + +:eof diff --git a/vendor/git.apache.org/thrift.git/lib/netcore/runtests.sh b/vendor/git.apache.org/thrift.git/lib/netcore/runtests.sh new file mode 100644 index 000000000..a26cc36ac --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/netcore/runtests.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +thrift -version +dotnet --info + +dotnet test Tests\Thrift.IntegrationTests\Thrift.IntegrationTests.csproj +dotnet test Tests\Thrift.Tests\Thrift.Tests.csproj \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/README.md b/vendor/git.apache.org/thrift.git/lib/nodejs/README.md index 8c25380d1..5b15ce647 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/README.md +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/README.md @@ -22,7 +22,7 @@ under the License. ## Compatibility -node version 4 or later is required +node version 6 or later is required ## Install diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/binary_protocol.js b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/binary_protocol.js index 0c0ee50be..b57c8c576 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/binary_protocol.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/binary_protocol.js @@ -37,6 +37,7 @@ function TBinaryProtocol(trans, strictRead, strictWrite) { this.trans = trans; this.strictRead = (strictRead !== undefined ? strictRead : false); this.strictWrite = (strictWrite !== undefined ? strictWrite : true); + this._seqid = null; }; TBinaryProtocol.prototype.flush = function() { @@ -54,7 +55,7 @@ TBinaryProtocol.prototype.writeMessageBegin = function(name, type, seqid) { this.writeI32(seqid); } // Record client seqid to find callback again - if (this._seqid) { + if (this._seqid !== null) { log.warning('SeqId already set', { 'name': name }); } else { this._seqid = seqid; @@ -63,7 +64,7 @@ TBinaryProtocol.prototype.writeMessageBegin = function(name, type, seqid) { }; TBinaryProtocol.prototype.writeMessageEnd = function() { - if (this._seqid) { + if (this._seqid !== null) { this._seqid = null; } else { log.warning('No seqid to unset'); diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/connection.js b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/connection.js index 273339b1f..b54545436 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/connection.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/connection.js @@ -232,7 +232,11 @@ Connection.prototype.connection_gone = function () { return; } - self.connection.connect(self.port, self.host); + if (self.path !== undefined) { + self.connection.connect(self.path); + } else { + self.connection.connect(self.port, self.host); + } self.retry_timer = null; }, this.retry_delay); }; @@ -246,6 +250,14 @@ exports.createConnection = function(host, port, options) { return connection; }; +exports.createUDSConnection = function(path, options) { + var stream = net.createConnection(path); + var connection = new Connection(stream, options); + connection.path = path; + + return connection; +}; + exports.createSSLConnection = function(host, port, options) { if (!('secureProtocol' in options) && !('secureOptions' in options)) { options.secureProtocol = "SSLv23_method"; diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/http_connection.js b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/http_connection.js index 508553801..4f5378f43 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/http_connection.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/http_connection.js @@ -59,8 +59,6 @@ var createClient = require('./create_client'); * Initializes a Thrift HttpConnection instance (use createHttpConnection() rather than * instantiating directly). * @constructor - * @param {string} host - The host name or IP to connect to. - * @param {number} port - The TCP port to connect to. * @param {ConnectOptions} options - The configuration options to use. * @throws {error} Exceptions other than InputBufferUnderrunError are rethrown * @event {error} The "error" event is fired when a Node.js error event occurs during @@ -71,15 +69,16 @@ var createClient = require('./create_client'); * semantics implemented over the Node.js http.request() method. * @see {@link createHttpConnection} */ -var HttpConnection = exports.HttpConnection = function(host, port, options) { +var HttpConnection = exports.HttpConnection = function(options) { //Initialize the emitter base object EventEmitter.call(this); //Set configuration var self = this; this.options = options || {}; - this.host = host; - this.port = port; + this.host = this.options.host; + this.port = this.options.port; + this.socketPath = this.options.socketPath; this.https = this.options.https || false; this.transport = this.options.transport || TBufferedTransport; this.protocol = this.options.protocol || TBinaryProtocol; @@ -87,7 +86,8 @@ var HttpConnection = exports.HttpConnection = function(host, port, options) { //Prepare Node.js options this.nodeOptions = { host: this.host, - port: this.port || 80, + port: this.port, + socketPath: this.socketPath, path: this.options.path || '/', method: 'POST', headers: this.options.headers || {}, @@ -238,7 +238,14 @@ HttpConnection.prototype.write = function(data) { * @see {@link ConnectOptions} */ exports.createHttpConnection = function(host, port, options) { - return new HttpConnection(host, port, options); + options.host = host; + options.port = port || 80; + return new HttpConnection(options); +}; + +exports.createHttpUDSConnection = function(path, options) { + options.socketPath = path; + return new HttpConnection(options); }; exports.createHttpClient = createClient @@ -253,4 +260,4 @@ function THTTPException(statusCode, response) { this.type = thrift.TApplicationExceptionType.PROTOCOL_ERROR; this.message = "Received a response with a bad HTTP status code: " + response.statusCode; } -util.inherits(THTTPException, thrift.TApplicationException); \ No newline at end of file +util.inherits(THTTPException, thrift.TApplicationException); diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/index.js b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/index.js index 020726d57..b09953d71 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/index.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/index.js @@ -27,6 +27,7 @@ var connection = require('./connection'); exports.Connection = connection.Connection; exports.createClient = connection.createClient; exports.createConnection = connection.createConnection; +exports.createUDSConnection = connection.createUDSConnection; exports.createSSLConnection = connection.createSSLConnection; exports.createStdIOClient = connection.createStdIOClient; exports.createStdIOConnection = connection.createStdIOConnection; @@ -34,6 +35,7 @@ exports.createStdIOConnection = connection.createStdIOConnection; var httpConnection = require('./http_connection'); exports.HttpConnection = httpConnection.HttpConnection; exports.createHttpConnection = httpConnection.createHttpConnection; +exports.createHttpUDSConnection = httpConnection.createHttpUDSConnection; exports.createHttpClient = httpConnection.createHttpClient; var wsConnection = require('./ws_connection'); diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/json_protocol.js b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/json_protocol.js index 84c62f2c1..d960be9d0 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/json_protocol.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/lib/thrift/json_protocol.js @@ -738,5 +738,65 @@ TJSONProtocol.prototype.getTransport = function() { * Method to arbitrarily skip over data */ TJSONProtocol.prototype.skip = function(type) { - throw new Error('skip not supported yet'); + switch (type) { + case Type.STOP: + return; + case Type.BOOL: + this.readBool(); + break; + case Type.BYTE: + this.readByte(); + break; + case Type.I16: + this.readI16(); + break; + case Type.I32: + this.readI32(); + break; + case Type.I64: + this.readI64(); + break; + case Type.DOUBLE: + this.readDouble(); + break; + case Type.STRING: + this.readString(); + break; + case Type.STRUCT: + this.readStructBegin(); + while (true) { + var r = this.readFieldBegin(); + if (r.ftype === Type.STOP) { + break; + } + this.skip(r.ftype); + this.readFieldEnd(); + } + this.readStructEnd(); + break; + case Type.MAP: + var mapBegin = this.readMapBegin(); + for (var i = 0; i < mapBegin.size; ++i) { + this.skip(mapBegin.ktype); + this.skip(mapBegin.vtype); + } + this.readMapEnd(); + break; + case Type.SET: + var setBegin = this.readSetBegin(); + for (var i2 = 0; i2 < setBegin.size; ++i2) { + this.skip(setBegin.etype); + } + this.readSetEnd(); + break; + case Type.LIST: + var listBegin = this.readListBegin(); + for (var i3 = 0; i3 < listBegin.size; ++i3) { + this.skip(listBegin.etype); + } + this.readListEnd(); + break; + default: + throw new Error("Invalid type: " + type); + } }; diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/test/browser_client.js b/vendor/git.apache.org/thrift.git/lib/nodejs/test/browser_client.js deleted file mode 100644 index 72fd8375b..000000000 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/test/browser_client.js +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -var assert = require('assert'); -var thrift = require('thrift'); -var helpers = require('./helpers'); -var ThriftTest = require('./gen-nodejs/ThriftTest'); -var ThriftTestDriver = require('./test_driver').ThriftTestDriver; - -// createXHRConnection createWSConnection -var connection = thrift.createXHRConnection("localhost", 9090, { - transport: helpers.transports['buffered'], - protocol: helpers.protocols['json'], - path: '/test' -}); - -connection.on('error', function(err) { - assert(false, err); -}); - -// Uncomment the following line to start a websockets connection -// connection.open(); - -// createWSClient createXHRClient -var client = thrift.createXHRClient(ThriftTest, connection); - -ThriftTestDriver(client, function (status) { - console.log('Browser:', status); -}); diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/test/client.js b/vendor/git.apache.org/thrift.git/lib/nodejs/test/client.js index 006fad2dc..55839f616 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/test/client.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/test/client.js @@ -36,6 +36,7 @@ program .option('-t, --transport ', 'Set thrift transport (buffered|framed|http) [transport]') .option('--port ', 'Set thrift server port number to connect', 9090) .option('--host ', 'Set thrift server host to connect', 'localhost') + .option('--domain-socket ', 'Set thrift server unix domain socket to connect') .option('--ssl', 'use SSL transport') .option('--promise', 'test with promise style functions') .option('-t, --type ', 'Select server type (http|multiplex|tcp|websocket)', 'tcp') @@ -43,6 +44,7 @@ program var host = program.host; var port = program.port; +var domainSocket = program.domainSocket; var type = program.type; var ssl = program.ssl; var promise = program.promise; @@ -83,11 +85,19 @@ var client; var testDriver = promise ? ThriftTestDriverPromise : ThriftTestDriver; if (type === 'tcp' || type === 'multiplex') { - connection = ssl ? - thrift.createSSLConnection(host, port, options) : - thrift.createConnection(host, port, options); + if (domainSocket) { + connection = thrift.createUDSConnection(domainSocket, options); + } else { + connection = ssl ? + thrift.createSSLConnection(host, port, options) : + thrift.createConnection(host, port, options); + } } else if (type === 'http') { - connection = thrift.createHttpConnection(host, port, options); + if (domainSocket) { + connection = thrift.createHttpUDSConnection(domainSocket, options); + } else { + connection = thrift.createHttpConnection(host, port, options); + } } else if (type === 'websocket') { connection = thrift.createWSConnection(host, port, options); connection.open(); diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/test/exceptions.js b/vendor/git.apache.org/thrift.git/lib/nodejs/test/exceptions.js index 0a7577062..afb3f67b7 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/test/exceptions.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/test/exceptions.js @@ -36,6 +36,20 @@ test('TApplicationException', function t(assert) { assert.end(); }); +test('unexpected TApplicationException ', function t(assert) { + var e = new thrift.TApplicationException(1, 100); + assert.ok(e instanceof thrift.TApplicationException, 'is instanceof TApplicationException'); + assert.ok(e instanceof thrift.TException, 'is instanceof TException'); + assert.ok(e instanceof Error, 'is instanceof Error'); + assert.equal(typeof e.stack, 'string', 'has stack trace'); + assert.ok(/^TApplicationException: 100/.test(e.stack), 'Stack trace has correct error name and message'); + assert.ok(e.stack.indexOf('test/exceptions.js:7:11') !== -1, 'stack trace starts on correct line and column'); + assert.equal(e.name, 'TApplicationException', 'has function name TApplicationException'); + assert.equal(e.message, 100, 'has error message 100'); + assert.equal(e.type, 1, 'has type 1'); + assert.end(); +}); + test('TException', function t(assert) { var e = new thrift.TException('foo'); assert.ok(e instanceof thrift.TException, 'is instanceof TException'); diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/test/server.js b/vendor/git.apache.org/thrift.git/lib/nodejs/test/server.js index 8f2e06b8d..030d28b38 100644 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/test/server.js +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/test/server.js @@ -36,11 +36,13 @@ program .option('-t, --transport ', 'Set thrift transport (buffered|framed|http)', 'buffered') .option('--ssl', 'use ssl transport') .option('--port ', 'Set thrift server port', 9090) + .option('--domain-socket ', 'Set thift server unix domain socket') .option('--promise', 'test with promise style functions') .option('-t, --type ', 'Select server type (http|multiplex|tcp|websocket)', 'tcp') .parse(process.argv); var port = program.port; +var domainSocket = program.domainSocket; var type = program.type; var ssl = program.ssl; var promise = program.promise; @@ -88,10 +90,12 @@ if (type === 'multiplex') { } if (ssl) { - options.tls = { - key: fs.readFileSync(path.resolve(__dirname, 'server.key')), - cert: fs.readFileSync(path.resolve(__dirname, 'server.crt')) - }; + if (type === 'tcp' || type === 'multiplex' || type === 'http' || type === 'websocket') { + options.tls = { + key: fs.readFileSync(path.resolve(__dirname, 'server.key')), + cert: fs.readFileSync(path.resolve(__dirname, 'server.crt')) + }; + } } var server; @@ -103,4 +107,8 @@ if (type === 'tcp') { server = thrift.createWebServer(options); } -server.listen(port); +if (domainSocket) { + server.listen(domainSocket); +} else if (type === 'tcp' || type === 'multiplex' || type === 'http' || type === 'websocket') { + server.listen(port); +} diff --git a/vendor/git.apache.org/thrift.git/lib/nodejs/test/testAll.sh b/vendor/git.apache.org/thrift.git/lib/nodejs/test/testAll.sh index aae451a48..ac22b64e8 100755 --- a/vendor/git.apache.org/thrift.git/lib/nodejs/test/testAll.sh +++ b/vendor/git.apache.org/thrift.git/lib/nodejs/test/testAll.sh @@ -24,7 +24,6 @@ fi DIR="$( cd "$( dirname "$0" )" && pwd )" ISTANBUL="$DIR/../../../node_modules/istanbul/lib/cli.js" -RUNBROWSER="$DIR/../../../node_modules/run-browser/bin/cli.js" REPORT_PREFIX="${DIR}/../coverage/report" @@ -55,17 +54,6 @@ testServer() return $RET } -testBrowser() -{ - echo " Testing browser client with http server with json protocol and buffered transport"; - RET=0 - node ${DIR}/server.js --type http -p json -t buffered & - SERVERPID=$! - sleep 1 - ${RUNBROWSER} ${DIR}/browser_client.js --phantom || RET=1 - kill -2 $SERVERPID || RET=1 - return $RET -} TESTOK=0 @@ -96,8 +84,6 @@ do done done -# XHR only until phantomjs 2 is released. -# testBrowser if [ -n "${COVER}" ]; then ${ISTANBUL} report --dir "${DIR}/../coverage" --include "${DIR}/../coverage/report*/coverage.json" lcov cobertura html diff --git a/vendor/git.apache.org/thrift.git/lib/ocaml/_oasis b/vendor/git.apache.org/thrift.git/lib/ocaml/_oasis index 19033e619..4dd95e5ce 100644 --- a/vendor/git.apache.org/thrift.git/lib/ocaml/_oasis +++ b/vendor/git.apache.org/thrift.git/lib/ocaml/_oasis @@ -1,5 +1,5 @@ Name: libthrift-ocaml -Version: 0.11.0 +Version: 1.0 OASISFormat: 0.3 Synopsis: OCaml bindings for the Apache Thrift RPC system Authors: Apache Thrift Developers diff --git a/vendor/git.apache.org/thrift.git/lib/perl/MANIFEST.SKIP b/vendor/git.apache.org/thrift.git/lib/perl/MANIFEST.SKIP new file mode 100644 index 000000000..7963b42ad --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/perl/MANIFEST.SKIP @@ -0,0 +1,13 @@ +blib/.*$ +build-cpan-dist.sh +FixupDist.pl +MANIFEST.bak +MANIFEST.SKIP +MYMETA.json +Makefile +Makefile.am +Makefile.in +pm_to_blib +test/Makefile.am +test/Makefile.in +tools/FixupDist.pl diff --git a/vendor/git.apache.org/thrift.git/lib/perl/Makefile.PL b/vendor/git.apache.org/thrift.git/lib/perl/Makefile.PL index ee7a43612..bdeaad2b7 100644 --- a/vendor/git.apache.org/thrift.git/lib/perl/Makefile.PL +++ b/vendor/git.apache.org/thrift.git/lib/perl/Makefile.PL @@ -17,7 +17,12 @@ # under the License. # +use 5.10.0; +use strict; +use warnings; + use ExtUtils::MakeMaker; + WriteMakefile( ABSTRACT => 'Apache Thrift is a software framework for scalable cross-language services development.', AUTHOR => 'Apache Thrift ', LICENSE => 'apache_2_0', diff --git a/vendor/git.apache.org/thrift.git/lib/perl/build-cpan-dist.sh b/vendor/git.apache.org/thrift.git/lib/perl/build-cpan-dist.sh index 1765e6d08..ae22e7e62 100755 --- a/vendor/git.apache.org/thrift.git/lib/perl/build-cpan-dist.sh +++ b/vendor/git.apache.org/thrift.git/lib/perl/build-cpan-dist.sh @@ -1,9 +1,53 @@ #!/bin/bash # # This script is intended to be used after tagging the repository and updating -# the version files for a release. It will create a CPAN archive. +# the version files for a release. It will create a CPAN archive. Run this +# from inside a docker image like ubuntu-xenial. +# + +set -e + +rm -f MANIFEST +rm -rf Thrift-* + +# setup cpan without a prompt +echo | cpan +cpan install HTTP::Date +cpan install CPAN +cpan install CPAN::Meta ExtUtils::MakeMaker JSON::PP perl Makefile.PL -make +rm MYMETA.yml make manifest make dist + +# +# We unpack the archive so we can add version metadata for CPAN +# so that it properly indexes Thrift and remove unnecessary files. +# + +echo '-----------------------------------------------------------' +set -x + +DISTFILE=$(ls Thrift*.gz) +NEWFILE=${DISTFILE/t-v/t-} +if [[ "$DISTFILE" != "$NEWFILE" ]]; then + mv $DISTFILE $NEWFILE + DISTFILE="$NEWFILE" +fi +tar xzf $DISTFILE +rm $DISTFILE +DISTDIR=$(ls -d Thrift*) +# cpan doesn't like "Thrift-v0.nn.0 as a directory name +# needs to be Thrift-0.nn.0 +NEWDIR=${DISTDIR/t-v/t-} +if [[ "$DISTDIR" != "$NEWDIR" ]]; then + mv $DISTDIR $NEWDIR + DISTDIR="$NEWDIR" +fi +cd $DISTDIR +cp -p ../Makefile.PL . +perl ../tools/FixupDist.pl +cd .. +tar cvzf $DISTFILE $DISTDIR +rm -r $DISTDIR diff --git a/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift.pm b/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift.pm index cb6235320..592d1ddcd 100644 --- a/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift.pm +++ b/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift.pm @@ -31,6 +31,6 @@ use warnings; # package Thrift; -use version 0.77; our $VERSION = version->declare("v0.11.0"); +use version 0.77; our $VERSION = version->declare("v1.0_0"); 1; diff --git a/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/Server.pm b/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/Server.pm index fc9ca30a9..f265d45f9 100644 --- a/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/Server.pm +++ b/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/Server.pm @@ -150,27 +150,31 @@ sub new sub serve { my $self = shift; - + my $stop = 0; + $self->{serverTransport}->listen(); - while (1) - { + while (!$stop) { my $client = $self->{serverTransport}->accept(); - my $itrans = $self->{inputTransportFactory}->getTransport($client); - my $otrans = $self->{outputTransportFactory}->getTransport($client); - my $iprot = $self->{inputProtocolFactory}->getProtocol($itrans); - my $oprot = $self->{outputProtocolFactory}->getProtocol($otrans); - eval { - $self->_clientBegin($iprot, $oprot); - while (1) - { - $self->{processor}->process($iprot, $oprot); + if (defined $client) { + my $itrans = $self->{inputTransportFactory}->getTransport($client); + my $otrans = $self->{outputTransportFactory}->getTransport($client); + my $iprot = $self->{inputProtocolFactory}->getProtocol($itrans); + my $oprot = $self->{outputProtocolFactory}->getProtocol($otrans); + eval { + $self->_clientBegin($iprot, $oprot); + while (1) + { + $self->{processor}->process($iprot, $oprot); + } + }; if($@) { + $self->_handleException($@); } - }; if($@) { - $self->_handleException($@); - } - $itrans->close(); - $otrans->close(); + $itrans->close(); + $otrans->close(); + } else { + $stop = 1; + } } } diff --git a/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/ServerSocket.pm b/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/ServerSocket.pm index 51f83b42f..2c4d906c4 100644 --- a/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/ServerSocket.pm +++ b/vendor/git.apache.org/thrift.git/lib/perl/lib/Thrift/ServerSocket.pm @@ -81,15 +81,24 @@ sub accept { my $self = shift; - if ( exists $self->{handle} and defined $self->{handle} ) - { + if ( exists $self->{handle} and defined $self->{handle} ) { my $client = $self->{handle}->accept(); my $result = $self->__client(); $result->{handle} = new IO::Select($client); return $result; } - return 0; + return undef; +} + +sub close +{ + my $self = shift; + + if ( exists $self->{handle} and defined $self->{handle} ) + { + $self->{handle}->close(); + } } ### diff --git a/vendor/git.apache.org/thrift.git/lib/perl/tools/FixupDist.pl b/vendor/git.apache.org/thrift.git/lib/perl/tools/FixupDist.pl new file mode 100644 index 000000000..24a2b200a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/perl/tools/FixupDist.pl @@ -0,0 +1,35 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# +# This will fix up the distribution so that CPAN properly +# indexes Thrift. +# + +use 5.10.0; +use strict; +use warnings; +use utf8; + +use Data::Dumper; +use CPAN::Meta; + +my $meta = CPAN::Meta->load_file('META.json'); +$meta->{'provides'} = { 'Thrift' => { 'file' => 'lib/Thrift.pm', 'version' => $meta->version() } }; +$meta->save('META.json'); diff --git a/vendor/git.apache.org/thrift.git/lib/php/Makefile.am b/vendor/git.apache.org/thrift.git/lib/php/Makefile.am index 5aa3be49b..8d9050a1d 100755 --- a/vendor/git.apache.org/thrift.git/lib/php/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/php/Makefile.am @@ -37,96 +37,96 @@ distclean-local: endif -phpdir = $(PHP_PREFIX)/Thrift +phpdir = $(PHP_PREFIX)/ php_DATA = \ - lib/Thrift/TMultiplexedProcessor.php + lib/TMultiplexedProcessor.php phpbasedir = $(phpdir)/Base phpbase_DATA = \ - lib/Thrift/Base/TBase.php + lib/Base/TBase.php phpclassloaderdir = $(phpdir)/ClassLoader phpclassloader_DATA = \ - lib/Thrift/ClassLoader/ThriftClassLoader.php + lib/ClassLoader/ThriftClassLoader.php phpexceptiondir = $(phpdir)/Exception phpexception_DATA = \ - lib/Thrift/Exception/TApplicationException.php \ - lib/Thrift/Exception/TException.php \ - lib/Thrift/Exception/TProtocolException.php \ - lib/Thrift/Exception/TTransportException.php + lib/Exception/TApplicationException.php \ + lib/Exception/TException.php \ + lib/Exception/TProtocolException.php \ + lib/Exception/TTransportException.php phpfactorydir = $(phpdir)/Factory phpfactory_DATA = \ - lib/Thrift/Factory/TBinaryProtocolFactory.php \ - lib/Thrift/Factory/TCompactProtocolFactory.php \ - lib/Thrift/Factory/TJSONProtocolFactory.php \ - lib/Thrift/Factory/TProtocolFactory.php \ - lib/Thrift/Factory/TStringFuncFactory.php \ - lib/Thrift/Factory/TTransportFactory.php + lib/Factory/TBinaryProtocolFactory.php \ + lib/Factory/TCompactProtocolFactory.php \ + lib/Factory/TJSONProtocolFactory.php \ + lib/Factory/TProtocolFactory.php \ + lib/Factory/TStringFuncFactory.php \ + lib/Factory/TTransportFactory.php phpprotocoldir = $(phpdir)/Protocol phpprotocol_DATA = \ - lib/Thrift/Protocol/TBinaryProtocolAccelerated.php \ - lib/Thrift/Protocol/TBinaryProtocol.php \ - lib/Thrift/Protocol/TCompactProtocol.php \ - lib/Thrift/Protocol/TJSONProtocol.php \ - lib/Thrift/Protocol/TMultiplexedProtocol.php \ - lib/Thrift/Protocol/TProtocol.php \ - lib/Thrift/Protocol/TProtocolDecorator.php \ - lib/Thrift/Protocol/TSimpleJSONProtocol.php + lib/Protocol/TBinaryProtocolAccelerated.php \ + lib/Protocol/TBinaryProtocol.php \ + lib/Protocol/TCompactProtocol.php \ + lib/Protocol/TJSONProtocol.php \ + lib/Protocol/TMultiplexedProtocol.php \ + lib/Protocol/TProtocol.php \ + lib/Protocol/TProtocolDecorator.php \ + lib/Protocol/TSimpleJSONProtocol.php phpprotocoljsondir = $(phpprotocoldir)/JSON phpprotocoljson_DATA = \ - lib/Thrift/Protocol/JSON/BaseContext.php \ - lib/Thrift/Protocol/JSON/ListContext.php \ - lib/Thrift/Protocol/JSON/LookaheadReader.php \ - lib/Thrift/Protocol/JSON/PairContext.php + lib/Protocol/JSON/BaseContext.php \ + lib/Protocol/JSON/ListContext.php \ + lib/Protocol/JSON/LookaheadReader.php \ + lib/Protocol/JSON/PairContext.php phpprotocolsimplejsondir = $(phpprotocoldir)/SimpleJSON phpprotocolsimplejson_DATA = \ - lib/Thrift/Protocol/SimpleJSON/CollectionMapKeyException.php \ - lib/Thrift/Protocol/SimpleJSON/Context.php \ - lib/Thrift/Protocol/SimpleJSON/ListContext.php \ - lib/Thrift/Protocol/SimpleJSON/MapContext.php \ - lib/Thrift/Protocol/SimpleJSON/StructContext.php + lib/Protocol/SimpleJSON/CollectionMapKeyException.php \ + lib/Protocol/SimpleJSON/Context.php \ + lib/Protocol/SimpleJSON/ListContext.php \ + lib/Protocol/SimpleJSON/MapContext.php \ + lib/Protocol/SimpleJSON/StructContext.php phpserializerdir = $(phpdir)/Serializer phpserializer_DATA = \ - lib/Thrift/Serializer/TBinarySerializer.php + lib/Serializer/TBinarySerializer.php phpserverdir = $(phpdir)/Server phpserver_DATA = \ - lib/Thrift/Server/TServerSocket.php \ - lib/Thrift/Server/TForkingServer.php \ - lib/Thrift/Server/TServer.php \ - lib/Thrift/Server/TServerTransport.php \ - lib/Thrift/Server/TSimpleServer.php + lib/Server/TServerSocket.php \ + lib/Server/TForkingServer.php \ + lib/Server/TServer.php \ + lib/Server/TServerTransport.php \ + lib/Server/TSimpleServer.php phpstringfuncdir = $(phpdir)/StringFunc phpstringfunc_DATA = \ - lib/Thrift/StringFunc/Mbstring.php \ - lib/Thrift/StringFunc/Core.php \ - lib/Thrift/StringFunc/TStringFunc.php + lib/StringFunc/Mbstring.php \ + lib/StringFunc/Core.php \ + lib/StringFunc/TStringFunc.php phptransportdir = $(phpdir)/Transport phptransport_DATA = \ - lib/Thrift/Transport/TBufferedTransport.php \ - lib/Thrift/Transport/TCurlClient.php \ - lib/Thrift/Transport/TFramedTransport.php \ - lib/Thrift/Transport/THttpClient.php \ - lib/Thrift/Transport/TMemoryBuffer.php \ - lib/Thrift/Transport/TNullTransport.php \ - lib/Thrift/Transport/TPhpStream.php \ - lib/Thrift/Transport/TSocket.php \ - lib/Thrift/Transport/TSocketPool.php \ - lib/Thrift/Transport/TTransport.php + lib/Transport/TBufferedTransport.php \ + lib/Transport/TCurlClient.php \ + lib/Transport/TFramedTransport.php \ + lib/Transport/THttpClient.php \ + lib/Transport/TMemoryBuffer.php \ + lib/Transport/TNullTransport.php \ + lib/Transport/TPhpStream.php \ + lib/Transport/TSocket.php \ + lib/Transport/TSocketPool.php \ + lib/Transport/TTransport.php phptypedir = $(phpdir)/Type phptype_DATA = \ - lib/Thrift/Type/TMessageType.php \ - lib/Thrift/Type/TType.php \ - lib/Thrift/Type/TConstant.php + lib/Type/TMessageType.php \ + lib/Type/TType.php \ + lib/Type/TConstant.php EXTRA_DIST = \ lib \ diff --git a/vendor/git.apache.org/thrift.git/lib/php/README.md b/vendor/git.apache.org/thrift.git/lib/php/README.md index c24ee2c0c..7170104df 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/README.md +++ b/vendor/git.apache.org/thrift.git/lib/php/README.md @@ -1,7 +1,6 @@ Thrift PHP Software Library -License -======= +# License Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file @@ -20,8 +19,7 @@ KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -Using Thrift with PHP -===================== +# Using Thrift with PHP Thrift requires PHP 5. Thrift makes as few assumptions about your PHP environment as possible while trying to make some more advanced PHP @@ -29,25 +27,34 @@ features (i.e. APC cacheing using asbolute path URLs) as simple as possible. To use Thrift in your PHP codebase, take the following steps: -#1) Copy all of thrift/lib/php/lib into your PHP codebase -#2) Configure Symfony Autoloader (or whatever you usually use) +1. Copy all of thrift/lib/php/lib into your PHP codebase +2. Configure Symfony Autoloader (or whatever you usually use) After that, you have to manually include the Thrift package created by the compiler: +``` require_once 'packages/Service/Service.php'; require_once 'packages/Service/Types.php'; +``` -Dependencies -============ +# Dependencies PHP_INT_SIZE - This built-in signals whether your architecture is 32 or 64 bit and is - used by the TBinaryProtocol to properly use pack() and unpack() to - serialize data. + This built-in signals whether your architecture is 32 or 64 bit and is + used by the TBinaryProtocol to properly use pack() and unpack() to + serialize data. apc_fetch(), apc_store() - APC cache is used by the TSocketPool class. If you do not have APC installed, - Thrift will fill in null stub function definitions. + APC cache is used by the TSocketPool class. If you do not have APC installed, + Thrift will fill in null stub function definitions. + +# Breaking Changes + +## 0.12.0 + +1. [PSR-4](https://www.php-fig.org/psr/psr-4/) loader is now the default. If you want to use class maps instead, use `-gen php:classmap`. + +2. If using PSR-4, use `$thriftClassLoader->registerNamespace('namespace', '')` instead of `$thriftClassLoader->registerDefinition('namespace', '')`. diff --git a/vendor/git.apache.org/thrift.git/lib/php/coding_standards.md b/vendor/git.apache.org/thrift.git/lib/php/coding_standards.md index fa0390bb5..e217539cd 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/coding_standards.md +++ b/vendor/git.apache.org/thrift.git/lib/php/coding_standards.md @@ -1 +1,5 @@ -Please follow [General Coding Standards](/doc/coding_standards.md) +## PHP Coding Standards + +Please follow: + * [Thrift General Coding Standards](/doc/coding_standards.md) + * [PSR-2](http://www.php-fig.org/psr/psr-2/) diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Base/TBase.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Base/TBase.php new file mode 100644 index 000000000..f2c514a60 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Base/TBase.php @@ -0,0 +1,382 @@ + 'Bool', + TType::BYTE => 'Byte', + TType::I16 => 'I16', + TType::I32 => 'I32', + TType::I64 => 'I64', + TType::DOUBLE => 'Double', + TType::STRING => 'String' + ); + + abstract public function read($input); + + abstract public function write($output); + + public function __construct($spec = null, $vals = null) + { + if (is_array($spec) && is_array($vals)) { + foreach ($spec as $fid => $fspec) { + $var = $fspec['var']; + if (isset($vals[$var])) { + $this->$var = $vals[$var]; + } + } + } + } + + public function __wakeup() + { + $this->__construct(get_object_vars($this)); + } + + private function _readMap(&$var, $spec, $input) + { + $xfer = 0; + $ktype = $spec['ktype']; + $vtype = $spec['vtype']; + $kread = $vread = null; + if (isset(TBase::$tmethod[$ktype])) { + $kread = 'read' . TBase::$tmethod[$ktype]; + } else { + $kspec = $spec['key']; + } + if (isset(TBase::$tmethod[$vtype])) { + $vread = 'read' . TBase::$tmethod[$vtype]; + } else { + $vspec = $spec['val']; + } + $var = array(); + $_ktype = $_vtype = $size = 0; + $xfer += $input->readMapBegin($_ktype, $_vtype, $size); + for ($i = 0; $i < $size; ++$i) { + $key = $val = null; + if ($kread !== null) { + $xfer += $input->$kread($key); + } else { + switch ($ktype) { + case TType::STRUCT: + $class = $kspec['class']; + $key = new $class(); + $xfer += $key->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($key, $kspec, $input); + break; + case TType::LST: + $xfer += $this->_readList($key, $kspec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($key, $kspec, $input, true); + break; + } + } + if ($vread !== null) { + $xfer += $input->$vread($val); + } else { + switch ($vtype) { + case TType::STRUCT: + $class = $vspec['class']; + $val = new $class(); + $xfer += $val->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($val, $vspec, $input); + break; + case TType::LST: + $xfer += $this->_readList($val, $vspec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($val, $vspec, $input, true); + break; + } + } + $var[$key] = $val; + } + $xfer += $input->readMapEnd(); + + return $xfer; + } + + private function _readList(&$var, $spec, $input, $set = false) + { + $xfer = 0; + $etype = $spec['etype']; + $eread = $vread = null; + if (isset(TBase::$tmethod[$etype])) { + $eread = 'read' . TBase::$tmethod[$etype]; + } else { + $espec = $spec['elem']; + } + $var = array(); + $_etype = $size = 0; + if ($set) { + $xfer += $input->readSetBegin($_etype, $size); + } else { + $xfer += $input->readListBegin($_etype, $size); + } + for ($i = 0; $i < $size; ++$i) { + $elem = null; + if ($eread !== null) { + $xfer += $input->$eread($elem); + } else { + $espec = $spec['elem']; + switch ($etype) { + case TType::STRUCT: + $class = $espec['class']; + $elem = new $class(); + $xfer += $elem->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($elem, $espec, $input); + break; + case TType::LST: + $xfer += $this->_readList($elem, $espec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($elem, $espec, $input, true); + break; + } + } + if ($set) { + $var[$elem] = true; + } else { + $var [] = $elem; + } + } + if ($set) { + $xfer += $input->readSetEnd(); + } else { + $xfer += $input->readListEnd(); + } + + return $xfer; + } + + protected function _read($class, $spec, $input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + if (isset($spec[$fid])) { + $fspec = $spec[$fid]; + $var = $fspec['var']; + if ($ftype == $fspec['type']) { + $xfer = 0; + if (isset(TBase::$tmethod[$ftype])) { + $func = 'read' . TBase::$tmethod[$ftype]; + $xfer += $input->$func($this->$var); + } else { + switch ($ftype) { + case TType::STRUCT: + $class = $fspec['class']; + $this->$var = new $class(); + $xfer += $this->$var->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($this->$var, $fspec, $input); + break; + case TType::LST: + $xfer += $this->_readList($this->$var, $fspec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($this->$var, $fspec, $input, true); + break; + } + } + } else { + $xfer += $input->skip($ftype); + } + } else { + $xfer += $input->skip($ftype); + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + + return $xfer; + } + + private function _writeMap($var, $spec, $output) + { + $xfer = 0; + $ktype = $spec['ktype']; + $vtype = $spec['vtype']; + $kwrite = $vwrite = null; + if (isset(TBase::$tmethod[$ktype])) { + $kwrite = 'write' . TBase::$tmethod[$ktype]; + } else { + $kspec = $spec['key']; + } + if (isset(TBase::$tmethod[$vtype])) { + $vwrite = 'write' . TBase::$tmethod[$vtype]; + } else { + $vspec = $spec['val']; + } + $xfer += $output->writeMapBegin($ktype, $vtype, count($var)); + foreach ($var as $key => $val) { + if (isset($kwrite)) { + $xfer += $output->$kwrite($key); + } else { + switch ($ktype) { + case TType::STRUCT: + $xfer += $key->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($key, $kspec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($key, $kspec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($key, $kspec, $output, true); + break; + } + } + if (isset($vwrite)) { + $xfer += $output->$vwrite($val); + } else { + switch ($vtype) { + case TType::STRUCT: + $xfer += $val->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($val, $vspec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($val, $vspec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($val, $vspec, $output, true); + break; + } + } + } + $xfer += $output->writeMapEnd(); + + return $xfer; + } + + private function _writeList($var, $spec, $output, $set = false) + { + $xfer = 0; + $etype = $spec['etype']; + $ewrite = null; + if (isset(TBase::$tmethod[$etype])) { + $ewrite = 'write' . TBase::$tmethod[$etype]; + } else { + $espec = $spec['elem']; + } + if ($set) { + $xfer += $output->writeSetBegin($etype, count($var)); + } else { + $xfer += $output->writeListBegin($etype, count($var)); + } + foreach ($var as $key => $val) { + $elem = $set ? $key : $val; + if (isset($ewrite)) { + $xfer += $output->$ewrite($elem); + } else { + switch ($etype) { + case TType::STRUCT: + $xfer += $elem->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($elem, $espec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($elem, $espec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($elem, $espec, $output, true); + break; + } + } + } + if ($set) { + $xfer += $output->writeSetEnd(); + } else { + $xfer += $output->writeListEnd(); + } + + return $xfer; + } + + protected function _write($class, $spec, $output) + { + $xfer = 0; + $xfer += $output->writeStructBegin($class); + foreach ($spec as $fid => $fspec) { + $var = $fspec['var']; + if ($this->$var !== null) { + $ftype = $fspec['type']; + $xfer += $output->writeFieldBegin($var, $ftype, $fid); + if (isset(TBase::$tmethod[$ftype])) { + $func = 'write' . TBase::$tmethod[$ftype]; + $xfer += $output->$func($this->$var); + } else { + switch ($ftype) { + case TType::STRUCT: + $xfer += $this->$var->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($this->$var, $fspec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($this->$var, $fspec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($this->$var, $fspec, $output, true); + break; + } + } + $xfer += $output->writeFieldEnd(); + } + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + + return $xfer; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/ClassLoader/ThriftClassLoader.php b/vendor/git.apache.org/thrift.git/lib/php/lib/ClassLoader/ThriftClassLoader.php similarity index 78% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/ClassLoader/ThriftClassLoader.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/ClassLoader/ThriftClassLoader.php index 67575ce18..4361bd84e 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/ClassLoader/ThriftClassLoader.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/ClassLoader/ThriftClassLoader.php @@ -54,7 +54,7 @@ class ThriftClassLoader /** * Set autoloader to use APC cache * @param boolean $apc - * @param string $apc_prefix + * @param string $apc_prefix */ public function __construct($apc = false, $apc_prefix = null) { @@ -65,23 +65,23 @@ class ThriftClassLoader /** * Registers a namespace. * - * @param string $namespace The namespace - * @param array|string $paths The location(s) of the namespace + * @param string $namespace The namespace + * @param array|string $paths The location(s) of the namespace */ public function registerNamespace($namespace, $paths) { - $this->namespaces[$namespace] = (array) $paths; + $this->namespaces[$namespace] = (array)$paths; } /** * Registers a Thrift definition namespace. * - * @param string $namespace The definition namespace - * @param array|string $paths The location(s) of the definition namespace + * @param string $namespace The definition namespace + * @param array|string $paths The location(s) of the definition namespace */ public function registerDefinition($namespace, $paths) { - $this->definitions[$namespace] = (array) $paths; + $this->definitions[$namespace] = (array)$paths; } /** @@ -101,11 +101,9 @@ class ThriftClassLoader */ public function loadClass($class) { - if ( - (true === $this->apc && ($file = $this->findFileInApc($class))) or + if ((true === $this->apc && ($file = $this->findFileInApc($class))) or ($file = $this->findFile($class)) - ) - { + ) { require_once $file; } } @@ -117,8 +115,8 @@ class ThriftClassLoader */ protected function findFileInApc($class) { - if (false === $file = apc_fetch($this->apc_prefix.$class)) { - apc_store($this->apc_prefix.$class, $file = $this->findFile($class)); + if (false === $file = apc_fetch($this->apc_prefix . $class)) { + apc_store($this->apc_prefix . $class, $file = $this->findFile($class)); } return $file; @@ -150,10 +148,10 @@ class ThriftClassLoader foreach ($dirs as $dir) { $className = substr($class, $pos + 1); - $file = $dir.DIRECTORY_SEPARATOR. - str_replace('\\', DIRECTORY_SEPARATOR, $namespace). - DIRECTORY_SEPARATOR. - $className.'.php'; + $file = $dir . DIRECTORY_SEPARATOR . + str_replace('\\', DIRECTORY_SEPARATOR, $namespace) . + DIRECTORY_SEPARATOR . + $className . '.php'; if (file_exists($file)) { return $file; @@ -185,20 +183,18 @@ class ThriftClassLoader * Available in service: Interface, Client, Processor, Rest * And every service methods (_.+) */ - if( - 0 === preg_match('#(.+)(if|client|processor|rest)$#i', $class, $n) and + if (0 === preg_match('#(.+)(if|client|processor|rest)$#i', $class, $n) and 0 === preg_match('#(.+)_[a-z0-9]+_(args|result)$#i', $class, $n) - ) - { + ) { $className = 'Types'; } else { $className = $n[1]; } - $file = $dir.DIRECTORY_SEPARATOR . - str_replace('\\', DIRECTORY_SEPARATOR, $namespace) . - DIRECTORY_SEPARATOR . - $className . '.php'; + $file = $dir . DIRECTORY_SEPARATOR . + str_replace('\\', DIRECTORY_SEPARATOR, $namespace) . + DIRECTORY_SEPARATOR . + $className . '.php'; if (file_exists($file)) { return $file; diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TApplicationException.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TApplicationException.php new file mode 100644 index 000000000..228add5b1 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TApplicationException.php @@ -0,0 +1,76 @@ + array('var' => 'message', + 'type' => TType::STRING), + 2 => array('var' => 'code', + 'type' => TType::I32)); + + const UNKNOWN = 0; + const UNKNOWN_METHOD = 1; + const INVALID_MESSAGE_TYPE = 2; + const WRONG_METHOD_NAME = 3; + const BAD_SEQUENCE_ID = 4; + const MISSING_RESULT = 5; + const INTERNAL_ERROR = 6; + const PROTOCOL_ERROR = 7; + const INVALID_TRANSFORM = 8; + const INVALID_PROTOCOL = 9; + const UNSUPPORTED_CLIENT_TYPE = 10; + + public function __construct($message = null, $code = 0) + { + parent::__construct($message, $code); + } + + public function read($output) + { + return $this->_read('TApplicationException', self::$_TSPEC, $output); + } + + public function write($output) + { + $xfer = 0; + $xfer += $output->writeStructBegin('TApplicationException'); + if ($message = $this->getMessage()) { + $xfer += $output->writeFieldBegin('message', TType::STRING, 1); + $xfer += $output->writeString($message); + $xfer += $output->writeFieldEnd(); + } + if ($code = $this->getCode()) { + $xfer += $output->writeFieldBegin('type', TType::I32, 2); + $xfer += $output->writeI32($code); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + + return $xfer; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TException.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TException.php new file mode 100644 index 000000000..e9c7c716d --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TException.php @@ -0,0 +1,384 @@ + $fspec) { + $var = $fspec['var']; + if (isset($vals[$var])) { + $this->$var = $vals[$var]; + } + } + } else { + parent::__construct($p1, $p2); + } + } + + static public $tmethod = array( + TType::BOOL => 'Bool', + TType::BYTE => 'Byte', + TType::I16 => 'I16', + TType::I32 => 'I32', + TType::I64 => 'I64', + TType::DOUBLE => 'Double', + TType::STRING => 'String' + ); + + private function _readMap(&$var, $spec, $input) + { + $xfer = 0; + $ktype = $spec['ktype']; + $vtype = $spec['vtype']; + $kread = $vread = null; + if (isset(TBase::$tmethod[$ktype])) { + $kread = 'read' . TBase::$tmethod[$ktype]; + } else { + $kspec = $spec['key']; + } + if (isset(TBase::$tmethod[$vtype])) { + $vread = 'read' . TBase::$tmethod[$vtype]; + } else { + $vspec = $spec['val']; + } + $var = array(); + $_ktype = $_vtype = $size = 0; + $xfer += $input->readMapBegin($_ktype, $_vtype, $size); + for ($i = 0; $i < $size; ++$i) { + $key = $val = null; + if ($kread !== null) { + $xfer += $input->$kread($key); + } else { + switch ($ktype) { + case TType::STRUCT: + $class = $kspec['class']; + $key = new $class(); + $xfer += $key->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($key, $kspec, $input); + break; + case TType::LST: + $xfer += $this->_readList($key, $kspec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($key, $kspec, $input, true); + break; + } + } + if ($vread !== null) { + $xfer += $input->$vread($val); + } else { + switch ($vtype) { + case TType::STRUCT: + $class = $vspec['class']; + $val = new $class(); + $xfer += $val->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($val, $vspec, $input); + break; + case TType::LST: + $xfer += $this->_readList($val, $vspec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($val, $vspec, $input, true); + break; + } + } + $var[$key] = $val; + } + $xfer += $input->readMapEnd(); + + return $xfer; + } + + private function _readList(&$var, $spec, $input, $set = false) + { + $xfer = 0; + $etype = $spec['etype']; + $eread = $vread = null; + if (isset(TBase::$tmethod[$etype])) { + $eread = 'read' . TBase::$tmethod[$etype]; + } else { + $espec = $spec['elem']; + } + $var = array(); + $_etype = $size = 0; + if ($set) { + $xfer += $input->readSetBegin($_etype, $size); + } else { + $xfer += $input->readListBegin($_etype, $size); + } + for ($i = 0; $i < $size; ++$i) { + $elem = null; + if ($eread !== null) { + $xfer += $input->$eread($elem); + } else { + $espec = $spec['elem']; + switch ($etype) { + case TType::STRUCT: + $class = $espec['class']; + $elem = new $class(); + $xfer += $elem->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($elem, $espec, $input); + break; + case TType::LST: + $xfer += $this->_readList($elem, $espec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($elem, $espec, $input, true); + break; + } + } + if ($set) { + $var[$elem] = true; + } else { + $var [] = $elem; + } + } + if ($set) { + $xfer += $input->readSetEnd(); + } else { + $xfer += $input->readListEnd(); + } + + return $xfer; + } + + protected function _read($class, $spec, $input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + if (isset($spec[$fid])) { + $fspec = $spec[$fid]; + $var = $fspec['var']; + if ($ftype == $fspec['type']) { + $xfer = 0; + if (isset(TBase::$tmethod[$ftype])) { + $func = 'read' . TBase::$tmethod[$ftype]; + $xfer += $input->$func($this->$var); + } else { + switch ($ftype) { + case TType::STRUCT: + $class = $fspec['class']; + $this->$var = new $class(); + $xfer += $this->$var->read($input); + break; + case TType::MAP: + $xfer += $this->_readMap($this->$var, $fspec, $input); + break; + case TType::LST: + $xfer += $this->_readList($this->$var, $fspec, $input, false); + break; + case TType::SET: + $xfer += $this->_readList($this->$var, $fspec, $input, true); + break; + } + } + } else { + $xfer += $input->skip($ftype); + } + } else { + $xfer += $input->skip($ftype); + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + + return $xfer; + } + + private function _writeMap($var, $spec, $output) + { + $xfer = 0; + $ktype = $spec['ktype']; + $vtype = $spec['vtype']; + $kwrite = $vwrite = null; + if (isset(TBase::$tmethod[$ktype])) { + $kwrite = 'write' . TBase::$tmethod[$ktype]; + } else { + $kspec = $spec['key']; + } + if (isset(TBase::$tmethod[$vtype])) { + $vwrite = 'write' . TBase::$tmethod[$vtype]; + } else { + $vspec = $spec['val']; + } + $xfer += $output->writeMapBegin($ktype, $vtype, count($var)); + foreach ($var as $key => $val) { + if (isset($kwrite)) { + $xfer += $output->$kwrite($key); + } else { + switch ($ktype) { + case TType::STRUCT: + $xfer += $key->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($key, $kspec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($key, $kspec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($key, $kspec, $output, true); + break; + } + } + if (isset($vwrite)) { + $xfer += $output->$vwrite($val); + } else { + switch ($vtype) { + case TType::STRUCT: + $xfer += $val->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($val, $vspec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($val, $vspec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($val, $vspec, $output, true); + break; + } + } + } + $xfer += $output->writeMapEnd(); + + return $xfer; + } + + private function _writeList($var, $spec, $output, $set = false) + { + $xfer = 0; + $etype = $spec['etype']; + $ewrite = null; + if (isset(TBase::$tmethod[$etype])) { + $ewrite = 'write' . TBase::$tmethod[$etype]; + } else { + $espec = $spec['elem']; + } + if ($set) { + $xfer += $output->writeSetBegin($etype, count($var)); + } else { + $xfer += $output->writeListBegin($etype, count($var)); + } + foreach ($var as $key => $val) { + $elem = $set ? $key : $val; + if (isset($ewrite)) { + $xfer += $output->$ewrite($elem); + } else { + switch ($etype) { + case TType::STRUCT: + $xfer += $elem->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($elem, $espec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($elem, $espec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($elem, $espec, $output, true); + break; + } + } + } + if ($set) { + $xfer += $output->writeSetEnd(); + } else { + $xfer += $output->writeListEnd(); + } + + return $xfer; + } + + protected function _write($class, $spec, $output) + { + $xfer = 0; + $xfer += $output->writeStructBegin($class); + foreach ($spec as $fid => $fspec) { + $var = $fspec['var']; + if ($this->$var !== null) { + $ftype = $fspec['type']; + $xfer += $output->writeFieldBegin($var, $ftype, $fid); + if (isset(TBase::$tmethod[$ftype])) { + $func = 'write' . TBase::$tmethod[$ftype]; + $xfer += $output->$func($this->$var); + } else { + switch ($ftype) { + case TType::STRUCT: + $xfer += $this->$var->write($output); + break; + case TType::MAP: + $xfer += $this->_writeMap($this->$var, $fspec, $output); + break; + case TType::LST: + $xfer += $this->_writeList($this->$var, $fspec, $output, false); + break; + case TType::SET: + $xfer += $this->_writeList($this->$var, $fspec, $output, true); + break; + } + } + $xfer += $output->writeFieldEnd(); + } + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + + return $xfer; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TProtocolException.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TProtocolException.php similarity index 78% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TProtocolException.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TProtocolException.php index ba7135c29..3a55d45ff 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TProtocolException.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TProtocolException.php @@ -35,16 +35,16 @@ namespace Thrift\Exception; */ class TProtocolException extends TException { - const UNKNOWN = 0; - const INVALID_DATA = 1; - const NEGATIVE_SIZE = 2; - const SIZE_LIMIT = 3; - const BAD_VERSION = 4; - const NOT_IMPLEMENTED = 5; - const DEPTH_LIMIT = 6; + const UNKNOWN = 0; + const INVALID_DATA = 1; + const NEGATIVE_SIZE = 2; + const SIZE_LIMIT = 3; + const BAD_VERSION = 4; + const NOT_IMPLEMENTED = 5; + const DEPTH_LIMIT = 6; - public function __construct($message=null, $code=0) - { - parent::__construct($message, $code); - } + public function __construct($message = null, $code = 0) + { + parent::__construct($message, $code); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TTransportException.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TTransportException.php similarity index 79% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TTransportException.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TTransportException.php index 0074467d6..7d8d56743 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TTransportException.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Exception/TTransportException.php @@ -27,14 +27,14 @@ namespace Thrift\Exception; */ class TTransportException extends TException { - const UNKNOWN = 0; - const NOT_OPEN = 1; - const ALREADY_OPEN = 2; - const TIMED_OUT = 3; - const END_OF_FILE = 4; + const UNKNOWN = 0; + const NOT_OPEN = 1; + const ALREADY_OPEN = 2; + const TIMED_OUT = 3; + const END_OF_FILE = 4; - public function __construct($message=null, $code=0) - { - parent::__construct($message, $code); - } + public function __construct($message = null, $code = 0) + { + parent::__construct($message, $code); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TBinaryProtocolFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TBinaryProtocolFactory.php similarity index 72% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TBinaryProtocolFactory.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TBinaryProtocolFactory.php index 0c1c4a726..2519183df 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TBinaryProtocolFactory.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TBinaryProtocolFactory.php @@ -29,17 +29,17 @@ use Thrift\Protocol\TBinaryProtocol; */ class TBinaryProtocolFactory implements TProtocolFactory { - private $strictRead_ = false; - private $strictWrite_ = false; + private $strictRead_ = false; + private $strictWrite_ = false; - public function __construct($strictRead=false, $strictWrite=false) - { - $this->strictRead_ = $strictRead; - $this->strictWrite_ = $strictWrite; - } + public function __construct($strictRead = false, $strictWrite = false) + { + $this->strictRead_ = $strictRead; + $this->strictWrite_ = $strictWrite; + } - public function getProtocol($trans) - { - return new TBinaryProtocol($trans, $this->strictRead_, $this->strictWrite_); - } + public function getProtocol($trans) + { + return new TBinaryProtocol($trans, $this->strictRead_, $this->strictWrite_); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TCompactProtocolFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TCompactProtocolFactory.php similarity index 87% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TCompactProtocolFactory.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TCompactProtocolFactory.php index f4b4fe3eb..11fb8ff33 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TCompactProtocolFactory.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TCompactProtocolFactory.php @@ -29,12 +29,12 @@ use Thrift\Protocol\TCompactProtocol; */ class TCompactProtocolFactory implements TProtocolFactory { - public function __construct() - { - } + public function __construct() + { + } - public function getProtocol($trans) - { - return new TCompactProtocol($trans); - } + public function getProtocol($trans) + { + return new TCompactProtocol($trans); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TJSONProtocolFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TJSONProtocolFactory.php similarity index 100% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TJSONProtocolFactory.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TJSONProtocolFactory.php diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TProtocolFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TProtocolFactory.php similarity index 85% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TProtocolFactory.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TProtocolFactory.php index 4c9562ddb..d3066c8ec 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TProtocolFactory.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TProtocolFactory.php @@ -27,10 +27,10 @@ namespace Thrift\Factory; */ interface TProtocolFactory { - /** - * Build a protocol from the base transport - * - * @return Thrift\Protocol\TProtocol protocol - */ - public function getProtocol($trans); + /** + * Build a protocol from the base transport + * + * @return Thrift\Protocol\TProtocol protocol + */ + public function getProtocol($trans); } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TStringFuncFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TStringFuncFactory.php similarity index 83% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TStringFuncFactory.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TStringFuncFactory.php index 6ad6839c8..30de4d780 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TStringFuncFactory.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TStringFuncFactory.php @@ -21,8 +21,9 @@ namespace Thrift\Factory; -use Thrift\StringFunc\Mbstring; use Thrift\StringFunc\Core; +use Thrift\StringFunc\Mbstring; +use Thrift\StringFunc\TStringFunc; class TStringFuncFactory { @@ -49,17 +50,16 @@ class TStringFuncFactory * Cannot use str* functions for byte counting because multibyte * characters will be read a single bytes. * - * See: http://us.php.net/manual/en/mbstring.overload.php + * See: http://php.net/manual/en/mbstring.overload.php */ if (ini_get('mbstring.func_overload') & 2) { self::$_instance = new Mbstring(); - } - /** - * mbstring is not installed or does not have function overloading - * of the str* functions enabled so use PHP core str* functions for - * byte counting. - */ - else { + } else { + /** + * mbstring is not installed or does not have function overloading + * of the str* functions enabled so use PHP core str* functions for + * byte counting. + */ self::$_instance = new Core(); } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TTransportFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TTransportFactory.php new file mode 100644 index 000000000..43f2eecde --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Factory/TTransportFactory.php @@ -0,0 +1,18 @@ +isKey; } } - - diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/SimpleJSON/StructContext.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/SimpleJSON/StructContext.php similarity index 93% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/SimpleJSON/StructContext.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/SimpleJSON/StructContext.php index 8162f2bc2..38a62d1a2 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/SimpleJSON/StructContext.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/SimpleJSON/StructContext.php @@ -43,11 +43,10 @@ class StructContext extends Context } else { $this->p_->getTransport()->write( $this->colon_ ? - TSimpleJSONProtocol::COLON : - TSimpleJSONProtocol::COMMA + TSimpleJSONProtocol::COLON : + TSimpleJSONProtocol::COMMA ); $this->colon_ = !$this->colon_; } } } - diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocol.php new file mode 100644 index 000000000..cda5c0d4c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocol.php @@ -0,0 +1,453 @@ +strictRead_ = $strictRead; + $this->strictWrite_ = $strictWrite; + } + + public function writeMessageBegin($name, $type, $seqid) + { + if ($this->strictWrite_) { + $version = self::VERSION_1 | $type; + + return + $this->writeI32($version) + + $this->writeString($name) + + $this->writeI32($seqid); + } else { + return + $this->writeString($name) + + $this->writeByte($type) + + $this->writeI32($seqid); + } + } + + public function writeMessageEnd() + { + return 0; + } + + public function writeStructBegin($name) + { + return 0; + } + + public function writeStructEnd() + { + return 0; + } + + public function writeFieldBegin($fieldName, $fieldType, $fieldId) + { + return + $this->writeByte($fieldType) + + $this->writeI16($fieldId); + } + + public function writeFieldEnd() + { + return 0; + } + + public function writeFieldStop() + { + return + $this->writeByte(TType::STOP); + } + + public function writeMapBegin($keyType, $valType, $size) + { + return + $this->writeByte($keyType) + + $this->writeByte($valType) + + $this->writeI32($size); + } + + public function writeMapEnd() + { + return 0; + } + + public function writeListBegin($elemType, $size) + { + return + $this->writeByte($elemType) + + $this->writeI32($size); + } + + public function writeListEnd() + { + return 0; + } + + public function writeSetBegin($elemType, $size) + { + return + $this->writeByte($elemType) + + $this->writeI32($size); + } + + public function writeSetEnd() + { + return 0; + } + + public function writeBool($value) + { + $data = pack('c', $value ? 1 : 0); + $this->trans_->write($data, 1); + + return 1; + } + + public function writeByte($value) + { + $data = pack('c', $value); + $this->trans_->write($data, 1); + + return 1; + } + + public function writeI16($value) + { + $data = pack('n', $value); + $this->trans_->write($data, 2); + + return 2; + } + + public function writeI32($value) + { + $data = pack('N', $value); + $this->trans_->write($data, 4); + + return 4; + } + + public function writeI64($value) + { + // If we are on a 32bit architecture we have to explicitly deal with + // 64-bit twos-complement arithmetic since PHP wants to treat all ints + // as signed and any int over 2^31 - 1 as a float + if (PHP_INT_SIZE == 4) { + $neg = $value < 0; + + if ($neg) { + $value *= -1; + } + + $hi = (int)($value / 4294967296); + $lo = (int)$value; + + if ($neg) { + $hi = ~$hi; + $lo = ~$lo; + if (($lo & (int)0xffffffff) == (int)0xffffffff) { + $lo = 0; + $hi++; + } else { + $lo++; + } + } + $data = pack('N2', $hi, $lo); + } else { + $hi = $value >> 32; + $lo = $value & 0xFFFFFFFF; + $data = pack('N2', $hi, $lo); + } + + $this->trans_->write($data, 8); + + return 8; + } + + public function writeDouble($value) + { + $data = pack('d', $value); + $this->trans_->write(strrev($data), 8); + + return 8; + } + + public function writeString($value) + { + $len = TStringFuncFactory::create()->strlen($value); + $result = $this->writeI32($len); + if ($len) { + $this->trans_->write($value, $len); + } + + return $result + $len; + } + + public function readMessageBegin(&$name, &$type, &$seqid) + { + $result = $this->readI32($sz); + if ($sz < 0) { + $version = (int)($sz & self::VERSION_MASK); + if ($version != (int)self::VERSION_1) { + throw new TProtocolException('Bad version identifier: ' . $sz, TProtocolException::BAD_VERSION); + } + $type = $sz & 0x000000ff; + $result += + $this->readString($name) + + $this->readI32($seqid); + } else { + if ($this->strictRead_) { + throw new TProtocolException( + 'No version identifier, old protocol client?', + TProtocolException::BAD_VERSION + ); + } else { + // Handle pre-versioned input + $name = $this->trans_->readAll($sz); + $result += + $sz + + $this->readByte($type) + + $this->readI32($seqid); + } + } + + return $result; + } + + public function readMessageEnd() + { + return 0; + } + + public function readStructBegin(&$name) + { + $name = ''; + + return 0; + } + + public function readStructEnd() + { + return 0; + } + + public function readFieldBegin(&$name, &$fieldType, &$fieldId) + { + $result = $this->readByte($fieldType); + if ($fieldType == TType::STOP) { + $fieldId = 0; + + return $result; + } + $result += $this->readI16($fieldId); + + return $result; + } + + public function readFieldEnd() + { + return 0; + } + + public function readMapBegin(&$keyType, &$valType, &$size) + { + return + $this->readByte($keyType) + + $this->readByte($valType) + + $this->readI32($size); + } + + public function readMapEnd() + { + return 0; + } + + public function readListBegin(&$elemType, &$size) + { + return + $this->readByte($elemType) + + $this->readI32($size); + } + + public function readListEnd() + { + return 0; + } + + public function readSetBegin(&$elemType, &$size) + { + return + $this->readByte($elemType) + + $this->readI32($size); + } + + public function readSetEnd() + { + return 0; + } + + public function readBool(&$value) + { + $data = $this->trans_->readAll(1); + $arr = unpack('c', $data); + $value = $arr[1] == 1; + + return 1; + } + + public function readByte(&$value) + { + $data = $this->trans_->readAll(1); + $arr = unpack('c', $data); + $value = $arr[1]; + + return 1; + } + + public function readI16(&$value) + { + $data = $this->trans_->readAll(2); + $arr = unpack('n', $data); + $value = $arr[1]; + if ($value > 0x7fff) { + $value = 0 - (($value - 1) ^ 0xffff); + } + + return 2; + } + + public function readI32(&$value) + { + $data = $this->trans_->readAll(4); + $arr = unpack('N', $data); + $value = $arr[1]; + if ($value > 0x7fffffff) { + $value = 0 - (($value - 1) ^ 0xffffffff); + } + + return 4; + } + + public function readI64(&$value) + { + $data = $this->trans_->readAll(8); + + $arr = unpack('N2', $data); + + // If we are on a 32bit architecture we have to explicitly deal with + // 64-bit twos-complement arithmetic since PHP wants to treat all ints + // as signed and any int over 2^31 - 1 as a float + if (PHP_INT_SIZE == 4) { + $hi = $arr[1]; + $lo = $arr[2]; + $isNeg = $hi < 0; + + // Check for a negative + if ($isNeg) { + $hi = ~$hi & (int)0xffffffff; + $lo = ~$lo & (int)0xffffffff; + + if ($lo == (int)0xffffffff) { + $hi++; + $lo = 0; + } else { + $lo++; + } + } + + // Force 32bit words in excess of 2G to pe positive - we deal wigh sign + // explicitly below + + if ($hi & (int)0x80000000) { + $hi &= (int)0x7fffffff; + $hi += 0x80000000; + } + + if ($lo & (int)0x80000000) { + $lo &= (int)0x7fffffff; + $lo += 0x80000000; + } + + $value = $hi * 4294967296 + $lo; + + if ($isNeg) { + $value = 0 - $value; + } + } else { + // Upcast negatives in LSB bit + if ($arr[2] & 0x80000000) { + $arr[2] = $arr[2] & 0xffffffff; + } + + // Check for a negative + if ($arr[1] & 0x80000000) { + $arr[1] = $arr[1] & 0xffffffff; + $arr[1] = $arr[1] ^ 0xffffffff; + $arr[2] = $arr[2] ^ 0xffffffff; + $value = 0 - $arr[1] * 4294967296 - $arr[2] - 1; + } else { + $value = $arr[1] * 4294967296 + $arr[2]; + } + } + + return 8; + } + + public function readDouble(&$value) + { + $data = strrev($this->trans_->readAll(8)); + $arr = unpack('d', $data); + $value = $arr[1]; + + return 8; + } + + public function readString(&$value) + { + $result = $this->readI32($len); + if ($len) { + $value = $this->trans_->readAll($len); + } else { + $value = ''; + } + + return $result + $len; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocolAccelerated.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocolAccelerated.php new file mode 100644 index 000000000..ff799a6ab --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TBinaryProtocolAccelerated.php @@ -0,0 +1,67 @@ +strictRead_; + } + + public function isStrictWrite() + { + return $this->strictWrite_; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TCompactProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TCompactProtocol.php new file mode 100644 index 000000000..1af2a274a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TCompactProtocol.php @@ -0,0 +1,739 @@ + TCompactProtocol::COMPACT_STOP, + TType::BOOL => TCompactProtocol::COMPACT_TRUE, // used for collection + TType::BYTE => TCompactProtocol::COMPACT_BYTE, + TType::I16 => TCompactProtocol::COMPACT_I16, + TType::I32 => TCompactProtocol::COMPACT_I32, + TType::I64 => TCompactProtocol::COMPACT_I64, + TType::DOUBLE => TCompactProtocol::COMPACT_DOUBLE, + TType::STRING => TCompactProtocol::COMPACT_BINARY, + TType::STRUCT => TCompactProtocol::COMPACT_STRUCT, + TType::LST => TCompactProtocol::COMPACT_LIST, + TType::SET => TCompactProtocol::COMPACT_SET, + TType::MAP => TCompactProtocol::COMPACT_MAP, + ); + + protected static $ttypes = array( + TCompactProtocol::COMPACT_STOP => TType::STOP, + TCompactProtocol::COMPACT_TRUE => TType::BOOL, // used for collection + TCompactProtocol::COMPACT_FALSE => TType::BOOL, + TCompactProtocol::COMPACT_BYTE => TType::BYTE, + TCompactProtocol::COMPACT_I16 => TType::I16, + TCompactProtocol::COMPACT_I32 => TType::I32, + TCompactProtocol::COMPACT_I64 => TType::I64, + TCompactProtocol::COMPACT_DOUBLE => TType::DOUBLE, + TCompactProtocol::COMPACT_BINARY => TType::STRING, + TCompactProtocol::COMPACT_STRUCT => TType::STRUCT, + TCompactProtocol::COMPACT_LIST => TType::LST, + TCompactProtocol::COMPACT_SET => TType::SET, + TCompactProtocol::COMPACT_MAP => TType::MAP, + ); + + protected $state = TCompactProtocol::STATE_CLEAR; + protected $lastFid = 0; + protected $boolFid = null; + protected $boolValue = null; + protected $structs = array(); + protected $containers = array(); + + // Some varint / zigzag helper methods + public function toZigZag($n, $bits) + { + return ($n << 1) ^ ($n >> ($bits - 1)); + } + + public function fromZigZag($n) + { + return ($n >> 1) ^ -($n & 1); + } + + public function getVarint($data) + { + $out = ""; + while (true) { + if (($data & ~0x7f) === 0) { + $out .= chr($data); + break; + } else { + $out .= chr(($data & 0xff) | 0x80); + $data = $data >> 7; + } + } + + return $out; + } + + public function writeVarint($data) + { + $out = $this->getVarint($data); + $result = TStringFuncFactory::create()->strlen($out); + $this->trans_->write($out, $result); + + return $result; + } + + public function readVarint(&$result) + { + $idx = 0; + $shift = 0; + $result = 0; + while (true) { + $x = $this->trans_->readAll(1); + $arr = unpack('C', $x); + $byte = $arr[1]; + $idx += 1; + $result |= ($byte & 0x7f) << $shift; + if (($byte >> 7) === 0) { + return $idx; + } + $shift += 7; + } + + return $idx; + } + + public function __construct($trans) + { + parent::__construct($trans); + } + + public function writeMessageBegin($name, $type, $seqid) + { + $written = + $this->writeUByte(TCompactProtocol::PROTOCOL_ID) + + $this->writeUByte(TCompactProtocol::VERSION | + ($type << TCompactProtocol::TYPE_SHIFT_AMOUNT)) + + $this->writeVarint($seqid) + + $this->writeString($name); + $this->state = TCompactProtocol::STATE_VALUE_WRITE; + + return $written; + } + + public function writeMessageEnd() + { + $this->state = TCompactProtocol::STATE_CLEAR; + + return 0; + } + + public function writeStructBegin($name) + { + $this->structs[] = array($this->state, $this->lastFid); + $this->state = TCompactProtocol::STATE_FIELD_WRITE; + $this->lastFid = 0; + + return 0; + } + + public function writeStructEnd() + { + $old_values = array_pop($this->structs); + $this->state = $old_values[0]; + $this->lastFid = $old_values[1]; + + return 0; + } + + public function writeFieldStop() + { + return $this->writeByte(0); + } + + public function writeFieldHeader($type, $fid) + { + $written = 0; + $delta = $fid - $this->lastFid; + if (0 < $delta && $delta <= 15) { + $written = $this->writeUByte(($delta << 4) | $type); + } else { + $written = $this->writeByte($type) + + $this->writeI16($fid); + } + $this->lastFid = $fid; + + return $written; + } + + public function writeFieldBegin($field_name, $field_type, $field_id) + { + if ($field_type == TTYPE::BOOL) { + $this->state = TCompactProtocol::STATE_BOOL_WRITE; + $this->boolFid = $field_id; + + return 0; + } else { + $this->state = TCompactProtocol::STATE_VALUE_WRITE; + + return $this->writeFieldHeader(self::$ctypes[$field_type], $field_id); + } + } + + public function writeFieldEnd() + { + $this->state = TCompactProtocol::STATE_FIELD_WRITE; + + return 0; + } + + public function writeCollectionBegin($etype, $size) + { + $written = 0; + if ($size <= 14) { + $written = $this->writeUByte($size << 4 | + self::$ctypes[$etype]); + } else { + $written = $this->writeUByte(0xf0 | + self::$ctypes[$etype]) + + $this->writeVarint($size); + } + $this->containers[] = $this->state; + $this->state = TCompactProtocol::STATE_CONTAINER_WRITE; + + return $written; + } + + public function writeMapBegin($key_type, $val_type, $size) + { + $written = 0; + if ($size == 0) { + $written = $this->writeByte(0); + } else { + $written = $this->writeVarint($size) + + $this->writeUByte(self::$ctypes[$key_type] << 4 | + self::$ctypes[$val_type]); + } + $this->containers[] = $this->state; + + return $written; + } + + public function writeCollectionEnd() + { + $this->state = array_pop($this->containers); + + return 0; + } + + public function writeMapEnd() + { + return $this->writeCollectionEnd(); + } + + public function writeListBegin($elem_type, $size) + { + return $this->writeCollectionBegin($elem_type, $size); + } + + public function writeListEnd() + { + return $this->writeCollectionEnd(); + } + + public function writeSetBegin($elem_type, $size) + { + return $this->writeCollectionBegin($elem_type, $size); + } + + public function writeSetEnd() + { + return $this->writeCollectionEnd(); + } + + public function writeBool($value) + { + if ($this->state == TCompactProtocol::STATE_BOOL_WRITE) { + $ctype = TCompactProtocol::COMPACT_FALSE; + if ($value) { + $ctype = TCompactProtocol::COMPACT_TRUE; + } + + return $this->writeFieldHeader($ctype, $this->boolFid); + } elseif ($this->state == TCompactProtocol::STATE_CONTAINER_WRITE) { + return $this->writeByte($value ? 1 : 0); + } else { + throw new TProtocolException('Invalid state in compact protocol'); + } + } + + public function writeByte($value) + { + $data = pack('c', $value); + $this->trans_->write($data, 1); + + return 1; + } + + public function writeUByte($byte) + { + $this->trans_->write(pack('C', $byte), 1); + + return 1; + } + + public function writeI16($value) + { + $thing = $this->toZigZag($value, 16); + + return $this->writeVarint($thing); + } + + public function writeI32($value) + { + $thing = $this->toZigZag($value, 32); + + return $this->writeVarint($thing); + } + + public function writeDouble($value) + { + $data = pack('d', $value); + $this->trans_->write($data, 8); + + return 8; + } + + public function writeString($value) + { + $len = TStringFuncFactory::create()->strlen($value); + $result = $this->writeVarint($len); + if ($len) { + $this->trans_->write($value, $len); + } + + return $result + $len; + } + + public function readFieldBegin(&$name, &$field_type, &$field_id) + { + $result = $this->readUByte($compact_type_and_delta); + + $compact_type = $compact_type_and_delta & 0x0f; + + if ($compact_type == TType::STOP) { + $field_type = $compact_type; + $field_id = 0; + + return $result; + } + $delta = $compact_type_and_delta >> 4; + if ($delta == 0) { + $result += $this->readI16($field_id); + } else { + $field_id = $this->lastFid + $delta; + } + $this->lastFid = $field_id; + $field_type = $this->getTType($compact_type); + + if ($compact_type == TCompactProtocol::COMPACT_TRUE) { + $this->state = TCompactProtocol::STATE_BOOL_READ; + $this->boolValue = true; + } elseif ($compact_type == TCompactProtocol::COMPACT_FALSE) { + $this->state = TCompactProtocol::STATE_BOOL_READ; + $this->boolValue = false; + } else { + $this->state = TCompactProtocol::STATE_VALUE_READ; + } + + return $result; + } + + public function readFieldEnd() + { + $this->state = TCompactProtocol::STATE_FIELD_READ; + + return 0; + } + + public function readUByte(&$value) + { + $data = $this->trans_->readAll(1); + $arr = unpack('C', $data); + $value = $arr[1]; + + return 1; + } + + public function readByte(&$value) + { + $data = $this->trans_->readAll(1); + $arr = unpack('c', $data); + $value = $arr[1]; + + return 1; + } + + public function readZigZag(&$value) + { + $result = $this->readVarint($value); + $value = $this->fromZigZag($value); + + return $result; + } + + public function readMessageBegin(&$name, &$type, &$seqid) + { + $protoId = 0; + $result = $this->readUByte($protoId); + if ($protoId != TCompactProtocol::PROTOCOL_ID) { + throw new TProtocolException('Bad protocol id in TCompact message'); + } + $verType = 0; + $result += $this->readUByte($verType); + $type = ($verType >> TCompactProtocol::TYPE_SHIFT_AMOUNT) & TCompactProtocol::TYPE_BITS; + $version = $verType & TCompactProtocol::VERSION_MASK; + if ($version != TCompactProtocol::VERSION) { + throw new TProtocolException('Bad version in TCompact message'); + } + $result += $this->readVarint($seqid); + $result += $this->readString($name); + + return $result; + } + + public function readMessageEnd() + { + return 0; + } + + public function readStructBegin(&$name) + { + $name = ''; // unused + $this->structs[] = array($this->state, $this->lastFid); + $this->state = TCompactProtocol::STATE_FIELD_READ; + $this->lastFid = 0; + + return 0; + } + + public function readStructEnd() + { + $last = array_pop($this->structs); + $this->state = $last[0]; + $this->lastFid = $last[1]; + + return 0; + } + + public function readCollectionBegin(&$type, &$size) + { + $sizeType = 0; + $result = $this->readUByte($sizeType); + $size = $sizeType >> 4; + $type = $this->getTType($sizeType); + if ($size == 15) { + $result += $this->readVarint($size); + } + $this->containers[] = $this->state; + $this->state = TCompactProtocol::STATE_CONTAINER_READ; + + return $result; + } + + public function readMapBegin(&$key_type, &$val_type, &$size) + { + $result = $this->readVarint($size); + $types = 0; + if ($size > 0) { + $result += $this->readUByte($types); + } + $val_type = $this->getTType($types); + $key_type = $this->getTType($types >> 4); + $this->containers[] = $this->state; + $this->state = TCompactProtocol::STATE_CONTAINER_READ; + + return $result; + } + + public function readCollectionEnd() + { + $this->state = array_pop($this->containers); + + return 0; + } + + public function readMapEnd() + { + return $this->readCollectionEnd(); + } + + public function readListBegin(&$elem_type, &$size) + { + return $this->readCollectionBegin($elem_type, $size); + } + + public function readListEnd() + { + return $this->readCollectionEnd(); + } + + public function readSetBegin(&$elem_type, &$size) + { + return $this->readCollectionBegin($elem_type, $size); + } + + public function readSetEnd() + { + return $this->readCollectionEnd(); + } + + public function readBool(&$value) + { + if ($this->state == TCompactProtocol::STATE_BOOL_READ) { + $value = $this->boolValue; + + return 0; + } elseif ($this->state == TCompactProtocol::STATE_CONTAINER_READ) { + return $this->readByte($value); + } else { + throw new TProtocolException('Invalid state in compact protocol'); + } + } + + public function readI16(&$value) + { + return $this->readZigZag($value); + } + + public function readI32(&$value) + { + return $this->readZigZag($value); + } + + public function readDouble(&$value) + { + $data = $this->trans_->readAll(8); + $arr = unpack('d', $data); + $value = $arr[1]; + + return 8; + } + + public function readString(&$value) + { + $result = $this->readVarint($len); + if ($len) { + $value = $this->trans_->readAll($len); + } else { + $value = ''; + } + + return $result + $len; + } + + public function getTType($byte) + { + return self::$ttypes[$byte & 0x0f]; + } + + // If we are on a 32bit architecture we have to explicitly deal with + // 64-bit twos-complement arithmetic since PHP wants to treat all ints + // as signed and any int over 2^31 - 1 as a float + + // Read and write I64 as two 32 bit numbers $hi and $lo + + public function readI64(&$value) + { + // Read varint from wire + $hi = 0; + $lo = 0; + + $idx = 0; + $shift = 0; + + while (true) { + $x = $this->trans_->readAll(1); + $arr = unpack('C', $x); + $byte = $arr[1]; + $idx += 1; + // Shift hi and lo together. + if ($shift < 28) { + $lo |= (($byte & 0x7f) << $shift); + } elseif ($shift == 28) { + $lo |= (($byte & 0x0f) << 28); + $hi |= (($byte & 0x70) >> 4); + } else { + $hi |= (($byte & 0x7f) << ($shift - 32)); + } + if (($byte >> 7) === 0) { + break; + } + $shift += 7; + } + + // Now, unzig it. + $xorer = 0; + if ($lo & 1) { + $xorer = 0xffffffff; + } + $lo = ($lo >> 1) & 0x7fffffff; + $lo = $lo | (($hi & 1) << 31); + $hi = ($hi >> 1) ^ $xorer; + $lo = $lo ^ $xorer; + + // Now put $hi and $lo back together + $isNeg = $hi < 0 || $hi & 0x80000000; + + // Check for a negative + if ($isNeg) { + $hi = ~$hi & (int)0xffffffff; + $lo = ~$lo & (int)0xffffffff; + + if ($lo == (int)0xffffffff) { + $hi++; + $lo = 0; + } else { + $lo++; + } + } + + // Force 32bit words in excess of 2G to be positive - we deal with sign + // explicitly below + + if ($hi & (int)0x80000000) { + $hi &= (int)0x7fffffff; + $hi += 0x80000000; + } + + if ($lo & (int)0x80000000) { + $lo &= (int)0x7fffffff; + $lo += 0x80000000; + } + + // Create as negative value first, since we can store -2^63 but not 2^63 + $value = -$hi * 4294967296 - $lo; + + if (!$isNeg) { + $value = -$value; + } + + return $idx; + } + + public function writeI64($value) + { + // If we are in an I32 range, use the easy method below. + if (($value > 4294967296) || ($value < -4294967296)) { + // Convert $value to $hi and $lo + $neg = $value < 0; + + if ($neg) { + $value *= -1; + } + + $hi = (int)$value >> 32; + $lo = (int)$value & 0xffffffff; + + if ($neg) { + $hi = ~$hi; + $lo = ~$lo; + if (($lo & (int)0xffffffff) == (int)0xffffffff) { + $lo = 0; + $hi++; + } else { + $lo++; + } + } + + // Now do the zigging and zagging. + $xorer = 0; + if ($neg) { + $xorer = 0xffffffff; + } + $lowbit = ($lo >> 31) & 1; + $hi = ($hi << 1) | $lowbit; + $lo = ($lo << 1); + $lo = ($lo ^ $xorer) & 0xffffffff; + $hi = ($hi ^ $xorer) & 0xffffffff; + + // now write out the varint, ensuring we shift both hi and lo + $out = ""; + while (true) { + if (($lo & ~0x7f) === 0 && + $hi === 0) { + $out .= chr($lo); + break; + } else { + $out .= chr(($lo & 0xff) | 0x80); + $lo = $lo >> 7; + $lo = $lo | ($hi << 25); + $hi = $hi >> 7; + // Right shift carries sign, but we don't want it to. + $hi = $hi & (127 << 25); + } + } + + $ret = TStringFuncFactory::create()->strlen($out); + $this->trans_->write($out, $ret); + + return $ret; + } else { + return $this->writeVarint($this->toZigZag($value, 64)); + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TJSONProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TJSONProtocol.php similarity index 90% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TJSONProtocol.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TJSONProtocol.php index 6d8e81faa..914488421 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TJSONProtocol.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TJSONProtocol.php @@ -217,9 +217,9 @@ class TJSONProtocol extends TProtocol private function hasJSONUnescapedUnicode() { - if (PHP_MAJOR_VERSION > 5 - || (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION >= 4)) + if (PHP_MAJOR_VERSION > 5 || (PHP_MAJOR_VERSION == 5 && PHP_MINOR_VERSION >= 4)) { return true; + } return false; } @@ -237,18 +237,24 @@ class TJSONProtocol extends TProtocol * High surrogate: 0xD800 - 0xDBFF * Low surrogate: 0xDC00 - 0xDFFF */ - $json = preg_replace_callback('/\\\\u(d[89ab][0-9a-f]{2})\\\\u(d[cdef][0-9a-f]{2})/i', + $json = preg_replace_callback( + '/\\\\u(d[89ab][0-9a-f]{2})\\\\u(d[cdef][0-9a-f]{2})/i', function ($matches) { - return mb_convert_encoding(pack('H*', $matches[1].$matches[2]), 'UTF-8', 'UTF-16BE'); - }, $json); + return mb_convert_encoding(pack('H*', $matches[1] . $matches[2]), 'UTF-8', 'UTF-16BE'); + }, + $json + ); /* * Unescaped characters within the Basic Multilingual Plane */ - $json = preg_replace_callback('/\\\\u([0-9a-f]{4})/i', + $json = preg_replace_callback( + '/\\\\u([0-9a-f]{4})/i', function ($matches) { return mb_convert_encoding(pack('H*', $matches[1]), 'UTF-8', 'UTF-16BE'); - }, $json); + }, + $json + ); return $json; } @@ -308,54 +314,54 @@ class TJSONProtocol extends TProtocol private function writeJSONObjectStart() { - $this->context_->write(); - $this->trans_->write(self::LBRACE); - $this->pushContext(new PairContext($this)); + $this->context_->write(); + $this->trans_->write(self::LBRACE); + $this->pushContext(new PairContext($this)); } private function writeJSONObjectEnd() { - $this->popContext(); - $this->trans_->write(self::RBRACE); + $this->popContext(); + $this->trans_->write(self::RBRACE); } private function writeJSONArrayStart() { - $this->context_->write(); - $this->trans_->write(self::LBRACKET); - $this->pushContext(new ListContext($this)); + $this->context_->write(); + $this->trans_->write(self::LBRACKET); + $this->pushContext(new ListContext($this)); } private function writeJSONArrayEnd() { - $this->popContext(); - $this->trans_->write(self::RBRACKET); + $this->popContext(); + $this->trans_->write(self::RBRACKET); } private function readJSONString($skipContext) { - if (!$skipContext) { - $this->context_->read(); - } - - $jsonString = ''; - $lastChar = null; - while (true) { - $ch = $this->reader_->read(); - $jsonString .= $ch; - if ($ch == self::QUOTE && - $lastChar !== NULL && - $lastChar !== self::ESCSEQ) { - break; + if (!$skipContext) { + $this->context_->read(); } - if ($ch == self::ESCSEQ && $lastChar == self::ESCSEQ) { - $lastChar = self::DOUBLEESC; - } else { - $lastChar = $ch; - } - } - return json_decode($jsonString); + $jsonString = ''; + $lastChar = null; + while (true) { + $ch = $this->reader_->read(); + $jsonString .= $ch; + if ($ch == self::QUOTE && + $lastChar !== null && + $lastChar !== self::ESCSEQ) { + break; + } + if ($ch == self::ESCSEQ && $lastChar == self::ESCSEQ) { + $lastChar = self::DOUBLEESC; + } else { + $lastChar = $ch; + } + } + + return json_decode($jsonString); } private function isJSONNumeric($b) @@ -376,8 +382,8 @@ class TJSONProtocol extends TProtocol case '9': case 'E': case 'e': - return true; - } + return true; + } return false; } @@ -459,8 +465,10 @@ class TJSONProtocol extends TProtocol } elseif ($arr == "Infinity") { return INF; } elseif (!$this->context_->escapeNum()) { - throw new TProtocolException("Numeric data unexpectedly quoted " . $arr, - TProtocolException::INVALID_DATA); + throw new TProtocolException( + "Numeric data unexpectedly quoted " . $arr, + TProtocolException::INVALID_DATA + ); } return floatval($arr); @@ -514,9 +522,9 @@ class TJSONProtocol extends TProtocol /** * Writes the message header * - * @param string $name Function name - * @param int $type message type TMessageType::CALL or TMessageType::REPLY - * @param int $seqid The sequence id of this message + * @param string $name Function name + * @param int $type message type TMessageType::CALL or TMessageType::REPLY + * @param int $seqid The sequence id of this message */ public function writeMessageBegin($name, $type, $seqid) { @@ -538,7 +546,7 @@ class TJSONProtocol extends TProtocol /** * Writes a struct header. * - * @param string $name Struct name + * @param string $name Struct name * @throws TException on write error * @return int How many bytes written */ @@ -652,7 +660,7 @@ class TJSONProtocol extends TProtocol * Reads the message header * * @param string $name Function name - * @param int $type message type TMessageType::CALL or TMessageType::REPLY + * @param int $type message type TMessageType::CALL or TMessageType::REPLY * @parem int $seqid The sequence id of this message */ public function readMessageBegin(&$name, &$type, &$seqid) diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TMultiplexedProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TMultiplexedProtocol.php similarity index 100% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TMultiplexedProtocol.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TMultiplexedProtocol.php diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocol.php new file mode 100644 index 000000000..81aceb623 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocol.php @@ -0,0 +1,350 @@ +trans_ = $trans; + } + + /** + * Accessor for transport + * + * @return TTransport + */ + public function getTransport() + { + return $this->trans_; + } + + /** + * Writes the message header + * + * @param string $name Function name + * @param int $type message type TMessageType::CALL or TMessageType::REPLY + * @param int $seqid The sequence id of this message + */ + abstract public function writeMessageBegin($name, $type, $seqid); + + /** + * Close the message + */ + abstract public function writeMessageEnd(); + + /** + * Writes a struct header. + * + * @param string $name Struct name + * @throws TException on write error + * @return int How many bytes written + */ + abstract public function writeStructBegin($name); + + /** + * Close a struct. + * + * @throws TException on write error + * @return int How many bytes written + */ + abstract public function writeStructEnd(); + + /* + * Starts a field. + * + * @param string $name Field name + * @param int $type Field type + * @param int $fid Field id + * @throws TException on write error + * @return int How many bytes written + */ + abstract public function writeFieldBegin($fieldName, $fieldType, $fieldId); + + abstract public function writeFieldEnd(); + + abstract public function writeFieldStop(); + + abstract public function writeMapBegin($keyType, $valType, $size); + + abstract public function writeMapEnd(); + + abstract public function writeListBegin($elemType, $size); + + abstract public function writeListEnd(); + + abstract public function writeSetBegin($elemType, $size); + + abstract public function writeSetEnd(); + + abstract public function writeBool($bool); + + abstract public function writeByte($byte); + + abstract public function writeI16($i16); + + abstract public function writeI32($i32); + + abstract public function writeI64($i64); + + abstract public function writeDouble($dub); + + abstract public function writeString($str); + + /** + * Reads the message header + * + * @param string $name Function name + * @param int $type message type TMessageType::CALL or TMessageType::REPLY + * @parem int $seqid The sequence id of this message + */ + abstract public function readMessageBegin(&$name, &$type, &$seqid); + + /** + * Read the close of message + */ + abstract public function readMessageEnd(); + + abstract public function readStructBegin(&$name); + + abstract public function readStructEnd(); + + abstract public function readFieldBegin(&$name, &$fieldType, &$fieldId); + + abstract public function readFieldEnd(); + + abstract public function readMapBegin(&$keyType, &$valType, &$size); + + abstract public function readMapEnd(); + + abstract public function readListBegin(&$elemType, &$size); + + abstract public function readListEnd(); + + abstract public function readSetBegin(&$elemType, &$size); + + abstract public function readSetEnd(); + + abstract public function readBool(&$bool); + + abstract public function readByte(&$byte); + + abstract public function readI16(&$i16); + + abstract public function readI32(&$i32); + + abstract public function readI64(&$i64); + + abstract public function readDouble(&$dub); + + abstract public function readString(&$str); + + /** + * The skip function is a utility to parse over unrecognized date without + * causing corruption. + * + * @param TType $type What type is it + */ + public function skip($type) + { + switch ($type) { + case TType::BOOL: + return $this->readBool($bool); + case TType::BYTE: + return $this->readByte($byte); + case TType::I16: + return $this->readI16($i16); + case TType::I32: + return $this->readI32($i32); + case TType::I64: + return $this->readI64($i64); + case TType::DOUBLE: + return $this->readDouble($dub); + case TType::STRING: + return $this->readString($str); + case TType::STRUCT: + $result = $this->readStructBegin($name); + while (true) { + $result += $this->readFieldBegin($name, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + $result += $this->skip($ftype); + $result += $this->readFieldEnd(); + } + $result += $this->readStructEnd(); + + return $result; + + case TType::MAP: + $result = $this->readMapBegin($keyType, $valType, $size); + for ($i = 0; $i < $size; $i++) { + $result += $this->skip($keyType); + $result += $this->skip($valType); + } + $result += $this->readMapEnd(); + + return $result; + + case TType::SET: + $result = $this->readSetBegin($elemType, $size); + for ($i = 0; $i < $size; $i++) { + $result += $this->skip($elemType); + } + $result += $this->readSetEnd(); + + return $result; + + case TType::LST: + $result = $this->readListBegin($elemType, $size); + for ($i = 0; $i < $size; $i++) { + $result += $this->skip($elemType); + } + $result += $this->readListEnd(); + + return $result; + + default: + throw new TProtocolException( + 'Unknown field type: ' . $type, + TProtocolException::INVALID_DATA + ); + } + } + + /** + * Utility for skipping binary data + * + * @param TTransport $itrans TTransport object + * @param int $type Field type + */ + public static function skipBinary($itrans, $type) + { + switch ($type) { + case TType::BOOL: + return $itrans->readAll(1); + case TType::BYTE: + return $itrans->readAll(1); + case TType::I16: + return $itrans->readAll(2); + case TType::I32: + return $itrans->readAll(4); + case TType::I64: + return $itrans->readAll(8); + case TType::DOUBLE: + return $itrans->readAll(8); + case TType::STRING: + $len = unpack('N', $itrans->readAll(4)); + $len = $len[1]; + if ($len > 0x7fffffff) { + $len = 0 - (($len - 1) ^ 0xffffffff); + } + + return 4 + $itrans->readAll($len); + + case TType::STRUCT: + $result = 0; + while (true) { + $ftype = 0; + $fid = 0; + $data = $itrans->readAll(1); + $arr = unpack('c', $data); + $ftype = $arr[1]; + if ($ftype == TType::STOP) { + break; + } + // I16 field id + $result += $itrans->readAll(2); + $result += self::skipBinary($itrans, $ftype); + } + + return $result; + + case TType::MAP: + // Ktype + $data = $itrans->readAll(1); + $arr = unpack('c', $data); + $ktype = $arr[1]; + // Vtype + $data = $itrans->readAll(1); + $arr = unpack('c', $data); + $vtype = $arr[1]; + // Size + $data = $itrans->readAll(4); + $arr = unpack('N', $data); + $size = $arr[1]; + if ($size > 0x7fffffff) { + $size = 0 - (($size - 1) ^ 0xffffffff); + } + $result = 6; + for ($i = 0; $i < $size; $i++) { + $result += self::skipBinary($itrans, $ktype); + $result += self::skipBinary($itrans, $vtype); + } + + return $result; + + case TType::SET: + case TType::LST: + // Vtype + $data = $itrans->readAll(1); + $arr = unpack('c', $data); + $vtype = $arr[1]; + // Size + $data = $itrans->readAll(4); + $arr = unpack('N', $data); + $size = $arr[1]; + if ($size > 0x7fffffff) { + $size = 0 - (($size - 1) ^ 0xffffffff); + } + $result = 5; + for ($i = 0; $i < $size; $i++) { + $result += self::skipBinary($itrans, $vtype); + } + + return $result; + + default: + throw new TProtocolException( + 'Unknown field type: ' . $type, + TProtocolException::INVALID_DATA + ); + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocolDecorator.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocolDecorator.php similarity index 99% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocolDecorator.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocolDecorator.php index c08c4d505..a85e0b8e5 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocolDecorator.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TProtocolDecorator.php @@ -21,6 +21,7 @@ */ namespace Thrift\Protocol; + use Thrift\Exception\TException; /** diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TSimpleJSONProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TSimpleJSONProtocol.php similarity index 98% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TSimpleJSONProtocol.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TSimpleJSONProtocol.php index 9cf90bdaa..1cf1f6407 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TSimpleJSONProtocol.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Protocol/TSimpleJSONProtocol.php @@ -54,7 +54,8 @@ class TSimpleJSONProtocol extends TProtocol /** * Push a new write context onto the stack. */ - protected function pushWriteContext(Context $c) { + protected function pushWriteContext(Context $c) + { $this->writeContextStack_[] = $this->writeContext_; $this->writeContext_ = $c; } @@ -62,14 +63,16 @@ class TSimpleJSONProtocol extends TProtocol /** * Pop the last write context off the stack */ - protected function popWriteContext() { + protected function popWriteContext() + { $this->writeContext_ = array_pop($this->writeContextStack_); } /** * Used to make sure that we are not encountering a map whose keys are containers */ - protected function assertContextIsNotMapKey($invalidKeyType) { + protected function assertContextIsNotMapKey($invalidKeyType) + { if ($this->writeContext_->isMapKey()) { throw new CollectionMapKeyException( "Cannot serialize a map with keys that are of type " . diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Serializer/TBinarySerializer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Serializer/TBinarySerializer.php new file mode 100644 index 000000000..9d2b14730 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Serializer/TBinarySerializer.php @@ -0,0 +1,87 @@ +getName(), + TMessageType::REPLY, + $object, + 0, + $protocol->isStrictWrite() + ); + + $protocol->readMessageBegin($unused_name, $unused_type, $unused_seqid); + } else { + $object->write($protocol); + } + $protocol->getTransport()->flush(); + + return $transport->getBuffer(); + } + + public static function deserialize($string_object, $class_name, $buffer_size = 8192) + { + $transport = new TMemoryBuffer(); + $protocol = new TBinaryProtocolAccelerated($transport); + if (function_exists('thrift_protocol_read_binary')) { + // NOTE (t.heintz) TBinaryProtocolAccelerated internally wraps our TMemoryBuffer in a + // TBufferedTransport, so we have to retrieve it again or risk losing data when writing + // less than 512 bytes to the transport (see the comment there as well). + // @see THRIFT-1579 + $protocol->writeMessageBegin('', TMessageType::REPLY, 0); + $protocolTransport = $protocol->getTransport(); + $protocolTransport->write($string_object); + $protocolTransport->flush(); + + return thrift_protocol_read_binary($protocol, $class_name, $protocol->isStrictRead(), $buffer_size); + } else { + $transport->write($string_object); + $object = new $class_name(); + $object->read($protocol); + + return $object; + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TForkingServer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TForkingServer.php new file mode 100644 index 000000000..0bb6e9192 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TForkingServer.php @@ -0,0 +1,125 @@ +transport_->listen(); + + while (!$this->stop_) { + try { + $transport = $this->transport_->accept(); + + if ($transport != null) { + $pid = pcntl_fork(); + + if ($pid > 0) { + $this->handleParent($transport, $pid); + } elseif ($pid === 0) { + $this->handleChild($transport); + } else { + throw new TException('Failed to fork'); + } + } + } catch (TTransportException $e) { + } + + $this->collectChildren(); + } + } + + /** + * Code run by the parent + * + * @param TTransport $transport + * @param int $pid + * @return void + */ + private function handleParent(TTransport $transport, $pid) + { + $this->children_[$pid] = $transport; + } + + /** + * Code run by the child. + * + * @param TTransport $transport + * @return void + */ + private function handleChild(TTransport $transport) + { + try { + $inputTransport = $this->inputTransportFactory_->getTransport($transport); + $outputTransport = $this->outputTransportFactory_->getTransport($transport); + $inputProtocol = $this->inputProtocolFactory_->getProtocol($inputTransport); + $outputProtocol = $this->outputProtocolFactory_->getProtocol($outputTransport); + while ($this->processor_->process($inputProtocol, $outputProtocol)) { + } + @$transport->close(); + } catch (TTransportException $e) { + } + + exit(0); + } + + /** + * Collects any children we may have + * + * @return void + */ + private function collectChildren() + { + foreach ($this->children_ as $pid => $transport) { + if (pcntl_waitpid($pid, $status, WNOHANG) > 0) { + unset($this->children_[$pid]); + if ($transport) { + @$transport->close(); + } + } + } + } + + /** + * Stops the server running. Kills the transport + * and then stops the main serving loop + * + * @return void + */ + public function stop() + { + $this->transport_->close(); + $this->stop_ = true; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSSLServerSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSSLServerSocket.php new file mode 100644 index 000000000..ac589b76b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSSLServerSocket.php @@ -0,0 +1,97 @@ +getSSLHost($host); + parent::__construct($ssl_host, $port); + $this->context_ = $context; + } + + public function getSSLHost($host) + { + $transport_protocol_loc = strpos($host, "://"); + if ($transport_protocol_loc === false) { + $host = 'ssl://' . $host; + } + return $host; + } + + /** + * Opens a new socket server handle + * + * @return void + */ + public function listen() + { + $this->listener_ = @stream_socket_server( + $this->host_ . ':' . $this->port_, + $errno, + $errstr, + STREAM_SERVER_BIND | STREAM_SERVER_LISTEN, + $this->context_ + ); + } + + /** + * Implementation of accept. If not client is accepted in the given time + * + * @return TSocket + */ + protected function acceptImpl() + { + $handle = @stream_socket_accept($this->listener_, $this->acceptTimeout_ / 1000.0); + if (!$handle) { + return null; + } + + $socket = new TSSLSocket(); + $socket->setHandle($handle); + + return $socket; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServer.php new file mode 100644 index 000000000..268c37820 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServer.php @@ -0,0 +1,102 @@ +processor_ = $processor; + $this->transport_ = $transport; + $this->inputTransportFactory_ = $inputTransportFactory; + $this->outputTransportFactory_ = $outputTransportFactory; + $this->inputProtocolFactory_ = $inputProtocolFactory; + $this->outputProtocolFactory_ = $outputProtocolFactory; + } + + /** + * Serves the server. This should never return + * unless a problem permits it to do so or it + * is interrupted intentionally + * + * @abstract + * @return void + */ + abstract public function serve(); + + /** + * Stops the server serving + * + * @abstract + * @return void + */ + abstract public function stop(); +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerSocket.php new file mode 100644 index 000000000..8f38fb23f --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerSocket.php @@ -0,0 +1,124 @@ +host_ = $host; + $this->port_ = $port; + } + + /** + * Sets the accept timeout + * + * @param int $acceptTimeout + * @return void + */ + public function setAcceptTimeout($acceptTimeout) + { + $this->acceptTimeout_ = $acceptTimeout; + } + + /** + * Opens a new socket server handle + * + * @return void + */ + public function listen() + { + $this->listener_ = stream_socket_server('tcp://' . $this->host_ . ':' . $this->port_); + } + + /** + * Closes the socket server handle + * + * @return void + */ + public function close() + { + @fclose($this->listener_); + $this->listener_ = null; + } + + /** + * Implementation of accept. If not client is accepted in the given time + * + * @return TSocket + */ + protected function acceptImpl() + { + $handle = @stream_socket_accept($this->listener_, $this->acceptTimeout_ / 1000.0); + if (!$handle) { + return null; + } + + $socket = new TSocket(); + $socket->setHandle($handle); + + return $socket; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerTransport.php new file mode 100644 index 000000000..15a27afa8 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TServerTransport.php @@ -0,0 +1,56 @@ +acceptImpl(); + + if ($transport == null) { + throw new TTransportException("accept() may not return NULL"); + } + + return $transport; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSimpleServer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSimpleServer.php new file mode 100644 index 000000000..4c1dda5a5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Server/TSimpleServer.php @@ -0,0 +1,60 @@ +transport_->listen(); + + while (!$this->stop_) { + try { + $transport = $this->transport_->accept(); + + if ($transport != null) { + $inputTransport = $this->inputTransportFactory_->getTransport($transport); + $outputTransport = $this->outputTransportFactory_->getTransport($transport); + $inputProtocol = $this->inputProtocolFactory_->getProtocol($inputTransport); + $outputProtocol = $this->outputProtocolFactory_->getProtocol($outputTransport); + while ($this->processor_->process($inputProtocol, $outputProtocol)) { + } + } + } catch (TTransportException $e) { + } + } + } + + /** + * Stops the server running. Kills the transport + * and then stops the main serving loop + * + * @return void + */ + public function stop() + { + $this->transport_->close(); + $this->stop_ = true; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/StoredMessageProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/StoredMessageProtocol.php new file mode 100644 index 000000000..c4aaaa9ec --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/StoredMessageProtocol.php @@ -0,0 +1,53 @@ +fname_ = $fname; + $this->mtype_ = $mtype; + $this->rseqid_ = $rseqid; + } + + public function readMessageBegin(&$name, &$type, &$seqid) + { + $name = $this->fname_; + $type = $this->mtype_; + $seqid = $this->rseqid_; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/StringFunc/Core.php b/vendor/git.apache.org/thrift.git/lib/php/lib/StringFunc/Core.php similarity index 100% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/StringFunc/Core.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/StringFunc/Core.php diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/StringFunc/Mbstring.php b/vendor/git.apache.org/thrift.git/lib/php/lib/StringFunc/Mbstring.php similarity index 100% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/StringFunc/Mbstring.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/StringFunc/Mbstring.php diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/StringFunc/TStringFunc.php b/vendor/git.apache.org/thrift.git/lib/php/lib/StringFunc/TStringFunc.php similarity index 100% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/StringFunc/TStringFunc.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/StringFunc/TStringFunc.php diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/TMultiplexedProcessor.php b/vendor/git.apache.org/thrift.git/lib/php/lib/TMultiplexedProcessor.php similarity index 84% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/TMultiplexedProcessor.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/TMultiplexedProcessor.php index 138f95bc4..a64a9687c 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/TMultiplexedProcessor.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/TMultiplexedProcessor.php @@ -25,7 +25,6 @@ namespace Thrift; use Thrift\Exception\TException; use Thrift\Protocol\TProtocol; use Thrift\Protocol\TMultiplexedProtocol; -use Thrift\Protocol\TProtocolDecorator; use Thrift\Type\TMessageType; /** @@ -112,32 +111,8 @@ class TMultiplexedProcessor $processor = $this->serviceProcessorMap_[$serviceName]; return $processor->process( - new StoredMessageProtocol($input, $messageName, $mtype, $rseqid), $output + new StoredMessageProtocol($input, $messageName, $mtype, $rseqid), + $output ); } } - -/** - * Our goal was to work with any protocol. In order to do that, we needed - * to allow them to call readMessageBegin() and get the Message in exactly - * the standard format, without the service name prepended to the Message name. - */ -class StoredMessageProtocol extends TProtocolDecorator -{ - private $fname_, $mtype_, $rseqid_; - - public function __construct(TProtocol $protocol, $fname, $mtype, $rseqid) - { - parent::__construct($protocol); - $this->fname_ = $fname; - $this->mtype_ = $mtype; - $this->rseqid_ = $rseqid; - } - - public function readMessageBegin(&$name, &$type, &$seqid) - { - $name = $this->fname_; - $type = $this->mtype_; - $seqid = $this->rseqid_; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Base/TBase.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Base/TBase.php deleted file mode 100644 index 4195f75df..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Base/TBase.php +++ /dev/null @@ -1,380 +0,0 @@ - 'Bool', - TType::BYTE => 'Byte', - TType::I16 => 'I16', - TType::I32 => 'I32', - TType::I64 => 'I64', - TType::DOUBLE => 'Double', - TType::STRING => 'String'); - - abstract public function read($input); - - abstract public function write($output); - - public function __construct($spec=null, $vals=null) - { - if (is_array($spec) && is_array($vals)) { - foreach ($spec as $fid => $fspec) { - $var = $fspec['var']; - if (isset($vals[$var])) { - $this->$var = $vals[$var]; - } - } - } - } - - public function __wakeup() - { - $this->__construct(get_object_vars($this)); - } - - private function _readMap(&$var, $spec, $input) - { - $xfer = 0; - $ktype = $spec['ktype']; - $vtype = $spec['vtype']; - $kread = $vread = null; - if (isset(TBase::$tmethod[$ktype])) { - $kread = 'read'.TBase::$tmethod[$ktype]; - } else { - $kspec = $spec['key']; - } - if (isset(TBase::$tmethod[$vtype])) { - $vread = 'read'.TBase::$tmethod[$vtype]; - } else { - $vspec = $spec['val']; - } - $var = array(); - $_ktype = $_vtype = $size = 0; - $xfer += $input->readMapBegin($_ktype, $_vtype, $size); - for ($i = 0; $i < $size; ++$i) { - $key = $val = null; - if ($kread !== null) { - $xfer += $input->$kread($key); - } else { - switch ($ktype) { - case TType::STRUCT: - $class = $kspec['class']; - $key = new $class(); - $xfer += $key->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($key, $kspec, $input); - break; - case TType::LST: - $xfer += $this->_readList($key, $kspec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($key, $kspec, $input, true); - break; - } - } - if ($vread !== null) { - $xfer += $input->$vread($val); - } else { - switch ($vtype) { - case TType::STRUCT: - $class = $vspec['class']; - $val = new $class(); - $xfer += $val->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($val, $vspec, $input); - break; - case TType::LST: - $xfer += $this->_readList($val, $vspec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($val, $vspec, $input, true); - break; - } - } - $var[$key] = $val; - } - $xfer += $input->readMapEnd(); - - return $xfer; - } - - private function _readList(&$var, $spec, $input, $set=false) - { - $xfer = 0; - $etype = $spec['etype']; - $eread = $vread = null; - if (isset(TBase::$tmethod[$etype])) { - $eread = 'read'.TBase::$tmethod[$etype]; - } else { - $espec = $spec['elem']; - } - $var = array(); - $_etype = $size = 0; - if ($set) { - $xfer += $input->readSetBegin($_etype, $size); - } else { - $xfer += $input->readListBegin($_etype, $size); - } - for ($i = 0; $i < $size; ++$i) { - $elem = null; - if ($eread !== null) { - $xfer += $input->$eread($elem); - } else { - $espec = $spec['elem']; - switch ($etype) { - case TType::STRUCT: - $class = $espec['class']; - $elem = new $class(); - $xfer += $elem->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($elem, $espec, $input); - break; - case TType::LST: - $xfer += $this->_readList($elem, $espec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($elem, $espec, $input, true); - break; - } - } - if ($set) { - $var[$elem] = true; - } else { - $var []= $elem; - } - } - if ($set) { - $xfer += $input->readSetEnd(); - } else { - $xfer += $input->readListEnd(); - } - - return $xfer; - } - - protected function _read($class, $spec, $input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - if (isset($spec[$fid])) { - $fspec = $spec[$fid]; - $var = $fspec['var']; - if ($ftype == $fspec['type']) { - $xfer = 0; - if (isset(TBase::$tmethod[$ftype])) { - $func = 'read'.TBase::$tmethod[$ftype]; - $xfer += $input->$func($this->$var); - } else { - switch ($ftype) { - case TType::STRUCT: - $class = $fspec['class']; - $this->$var = new $class(); - $xfer += $this->$var->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($this->$var, $fspec, $input); - break; - case TType::LST: - $xfer += $this->_readList($this->$var, $fspec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($this->$var, $fspec, $input, true); - break; - } - } - } else { - $xfer += $input->skip($ftype); - } - } else { - $xfer += $input->skip($ftype); - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - - return $xfer; - } - - private function _writeMap($var, $spec, $output) - { - $xfer = 0; - $ktype = $spec['ktype']; - $vtype = $spec['vtype']; - $kwrite = $vwrite = null; - if (isset(TBase::$tmethod[$ktype])) { - $kwrite = 'write'.TBase::$tmethod[$ktype]; - } else { - $kspec = $spec['key']; - } - if (isset(TBase::$tmethod[$vtype])) { - $vwrite = 'write'.TBase::$tmethod[$vtype]; - } else { - $vspec = $spec['val']; - } - $xfer += $output->writeMapBegin($ktype, $vtype, count($var)); - foreach ($var as $key => $val) { - if (isset($kwrite)) { - $xfer += $output->$kwrite($key); - } else { - switch ($ktype) { - case TType::STRUCT: - $xfer += $key->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($key, $kspec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($key, $kspec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($key, $kspec, $output, true); - break; - } - } - if (isset($vwrite)) { - $xfer += $output->$vwrite($val); - } else { - switch ($vtype) { - case TType::STRUCT: - $xfer += $val->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($val, $vspec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($val, $vspec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($val, $vspec, $output, true); - break; - } - } - } - $xfer += $output->writeMapEnd(); - - return $xfer; - } - - private function _writeList($var, $spec, $output, $set=false) - { - $xfer = 0; - $etype = $spec['etype']; - $ewrite = null; - if (isset(TBase::$tmethod[$etype])) { - $ewrite = 'write'.TBase::$tmethod[$etype]; - } else { - $espec = $spec['elem']; - } - if ($set) { - $xfer += $output->writeSetBegin($etype, count($var)); - } else { - $xfer += $output->writeListBegin($etype, count($var)); - } - foreach ($var as $key => $val) { - $elem = $set ? $key : $val; - if (isset($ewrite)) { - $xfer += $output->$ewrite($elem); - } else { - switch ($etype) { - case TType::STRUCT: - $xfer += $elem->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($elem, $espec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($elem, $espec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($elem, $espec, $output, true); - break; - } - } - } - if ($set) { - $xfer += $output->writeSetEnd(); - } else { - $xfer += $output->writeListEnd(); - } - - return $xfer; - } - - protected function _write($class, $spec, $output) - { - $xfer = 0; - $xfer += $output->writeStructBegin($class); - foreach ($spec as $fid => $fspec) { - $var = $fspec['var']; - if ($this->$var !== null) { - $ftype = $fspec['type']; - $xfer += $output->writeFieldBegin($var, $ftype, $fid); - if (isset(TBase::$tmethod[$ftype])) { - $func = 'write'.TBase::$tmethod[$ftype]; - $xfer += $output->$func($this->$var); - } else { - switch ($ftype) { - case TType::STRUCT: - $xfer += $this->$var->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($this->$var, $fspec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($this->$var, $fspec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($this->$var, $fspec, $output, true); - break; - } - } - $xfer += $output->writeFieldEnd(); - } - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - - return $xfer; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TApplicationException.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TApplicationException.php deleted file mode 100644 index b1689fc34..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TApplicationException.php +++ /dev/null @@ -1,76 +0,0 @@ - array('var' => 'message', - 'type' => TType::STRING), - 2 => array('var' => 'code', - 'type' => TType::I32)); - - const UNKNOWN = 0; - const UNKNOWN_METHOD = 1; - const INVALID_MESSAGE_TYPE = 2; - const WRONG_METHOD_NAME = 3; - const BAD_SEQUENCE_ID = 4; - const MISSING_RESULT = 5; - const INTERNAL_ERROR = 6; - const PROTOCOL_ERROR = 7; - const INVALID_TRANSFORM = 8; - const INVALID_PROTOCOL = 9; - const UNSUPPORTED_CLIENT_TYPE = 10; - - public function __construct($message=null, $code=0) - { - parent::__construct($message, $code); - } - - public function read($output) - { - return $this->_read('TApplicationException', self::$_TSPEC, $output); - } - - public function write($output) - { - $xfer = 0; - $xfer += $output->writeStructBegin('TApplicationException'); - if ($message = $this->getMessage()) { - $xfer += $output->writeFieldBegin('message', TType::STRING, 1); - $xfer += $output->writeString($message); - $xfer += $output->writeFieldEnd(); - } - if ($code = $this->getCode()) { - $xfer += $output->writeFieldBegin('type', TType::I32, 2); - $xfer += $output->writeI32($code); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - - return $xfer; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TException.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TException.php deleted file mode 100644 index 5c0684384..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Exception/TException.php +++ /dev/null @@ -1,383 +0,0 @@ - $fspec) { - $var = $fspec['var']; - if (isset($vals[$var])) { - $this->$var = $vals[$var]; - } - } - } else { - parent::__construct($p1, $p2); - } - } - - static $tmethod = array(TType::BOOL => 'Bool', - TType::BYTE => 'Byte', - TType::I16 => 'I16', - TType::I32 => 'I32', - TType::I64 => 'I64', - TType::DOUBLE => 'Double', - TType::STRING => 'String'); - - private function _readMap(&$var, $spec, $input) - { - $xfer = 0; - $ktype = $spec['ktype']; - $vtype = $spec['vtype']; - $kread = $vread = null; - if (isset(TBase::$tmethod[$ktype])) { - $kread = 'read'.TBase::$tmethod[$ktype]; - } else { - $kspec = $spec['key']; - } - if (isset(TBase::$tmethod[$vtype])) { - $vread = 'read'.TBase::$tmethod[$vtype]; - } else { - $vspec = $spec['val']; - } - $var = array(); - $_ktype = $_vtype = $size = 0; - $xfer += $input->readMapBegin($_ktype, $_vtype, $size); - for ($i = 0; $i < $size; ++$i) { - $key = $val = null; - if ($kread !== null) { - $xfer += $input->$kread($key); - } else { - switch ($ktype) { - case TType::STRUCT: - $class = $kspec['class']; - $key = new $class(); - $xfer += $key->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($key, $kspec, $input); - break; - case TType::LST: - $xfer += $this->_readList($key, $kspec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($key, $kspec, $input, true); - break; - } - } - if ($vread !== null) { - $xfer += $input->$vread($val); - } else { - switch ($vtype) { - case TType::STRUCT: - $class = $vspec['class']; - $val = new $class(); - $xfer += $val->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($val, $vspec, $input); - break; - case TType::LST: - $xfer += $this->_readList($val, $vspec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($val, $vspec, $input, true); - break; - } - } - $var[$key] = $val; - } - $xfer += $input->readMapEnd(); - - return $xfer; - } - - private function _readList(&$var, $spec, $input, $set=false) - { - $xfer = 0; - $etype = $spec['etype']; - $eread = $vread = null; - if (isset(TBase::$tmethod[$etype])) { - $eread = 'read'.TBase::$tmethod[$etype]; - } else { - $espec = $spec['elem']; - } - $var = array(); - $_etype = $size = 0; - if ($set) { - $xfer += $input->readSetBegin($_etype, $size); - } else { - $xfer += $input->readListBegin($_etype, $size); - } - for ($i = 0; $i < $size; ++$i) { - $elem = null; - if ($eread !== null) { - $xfer += $input->$eread($elem); - } else { - $espec = $spec['elem']; - switch ($etype) { - case TType::STRUCT: - $class = $espec['class']; - $elem = new $class(); - $xfer += $elem->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($elem, $espec, $input); - break; - case TType::LST: - $xfer += $this->_readList($elem, $espec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($elem, $espec, $input, true); - break; - } - } - if ($set) { - $var[$elem] = true; - } else { - $var []= $elem; - } - } - if ($set) { - $xfer += $input->readSetEnd(); - } else { - $xfer += $input->readListEnd(); - } - - return $xfer; - } - - protected function _read($class, $spec, $input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - if (isset($spec[$fid])) { - $fspec = $spec[$fid]; - $var = $fspec['var']; - if ($ftype == $fspec['type']) { - $xfer = 0; - if (isset(TBase::$tmethod[$ftype])) { - $func = 'read'.TBase::$tmethod[$ftype]; - $xfer += $input->$func($this->$var); - } else { - switch ($ftype) { - case TType::STRUCT: - $class = $fspec['class']; - $this->$var = new $class(); - $xfer += $this->$var->read($input); - break; - case TType::MAP: - $xfer += $this->_readMap($this->$var, $fspec, $input); - break; - case TType::LST: - $xfer += $this->_readList($this->$var, $fspec, $input, false); - break; - case TType::SET: - $xfer += $this->_readList($this->$var, $fspec, $input, true); - break; - } - } - } else { - $xfer += $input->skip($ftype); - } - } else { - $xfer += $input->skip($ftype); - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - - return $xfer; - } - - private function _writeMap($var, $spec, $output) - { - $xfer = 0; - $ktype = $spec['ktype']; - $vtype = $spec['vtype']; - $kwrite = $vwrite = null; - if (isset(TBase::$tmethod[$ktype])) { - $kwrite = 'write'.TBase::$tmethod[$ktype]; - } else { - $kspec = $spec['key']; - } - if (isset(TBase::$tmethod[$vtype])) { - $vwrite = 'write'.TBase::$tmethod[$vtype]; - } else { - $vspec = $spec['val']; - } - $xfer += $output->writeMapBegin($ktype, $vtype, count($var)); - foreach ($var as $key => $val) { - if (isset($kwrite)) { - $xfer += $output->$kwrite($key); - } else { - switch ($ktype) { - case TType::STRUCT: - $xfer += $key->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($key, $kspec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($key, $kspec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($key, $kspec, $output, true); - break; - } - } - if (isset($vwrite)) { - $xfer += $output->$vwrite($val); - } else { - switch ($vtype) { - case TType::STRUCT: - $xfer += $val->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($val, $vspec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($val, $vspec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($val, $vspec, $output, true); - break; - } - } - } - $xfer += $output->writeMapEnd(); - - return $xfer; - } - - private function _writeList($var, $spec, $output, $set=false) - { - $xfer = 0; - $etype = $spec['etype']; - $ewrite = null; - if (isset(TBase::$tmethod[$etype])) { - $ewrite = 'write'.TBase::$tmethod[$etype]; - } else { - $espec = $spec['elem']; - } - if ($set) { - $xfer += $output->writeSetBegin($etype, count($var)); - } else { - $xfer += $output->writeListBegin($etype, count($var)); - } - foreach ($var as $key => $val) { - $elem = $set ? $key : $val; - if (isset($ewrite)) { - $xfer += $output->$ewrite($elem); - } else { - switch ($etype) { - case TType::STRUCT: - $xfer += $elem->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($elem, $espec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($elem, $espec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($elem, $espec, $output, true); - break; - } - } - } - if ($set) { - $xfer += $output->writeSetEnd(); - } else { - $xfer += $output->writeListEnd(); - } - - return $xfer; - } - - protected function _write($class, $spec, $output) - { - $xfer = 0; - $xfer += $output->writeStructBegin($class); - foreach ($spec as $fid => $fspec) { - $var = $fspec['var']; - if ($this->$var !== null) { - $ftype = $fspec['type']; - $xfer += $output->writeFieldBegin($var, $ftype, $fid); - if (isset(TBase::$tmethod[$ftype])) { - $func = 'write'.TBase::$tmethod[$ftype]; - $xfer += $output->$func($this->$var); - } else { - switch ($ftype) { - case TType::STRUCT: - $xfer += $this->$var->write($output); - break; - case TType::MAP: - $xfer += $this->_writeMap($this->$var, $fspec, $output); - break; - case TType::LST: - $xfer += $this->_writeList($this->$var, $fspec, $output, false); - break; - case TType::SET: - $xfer += $this->_writeList($this->$var, $fspec, $output, true); - break; - } - } - $xfer += $output->writeFieldEnd(); - } - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - - return $xfer; - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TTransportFactory.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TTransportFactory.php deleted file mode 100644 index b32b5f414..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Factory/TTransportFactory.php +++ /dev/null @@ -1,18 +0,0 @@ -strictRead_ = $strictRead; - $this->strictWrite_ = $strictWrite; - } - - public function writeMessageBegin($name, $type, $seqid) - { - if ($this->strictWrite_) { - $version = self::VERSION_1 | $type; - - return - $this->writeI32($version) + - $this->writeString($name) + - $this->writeI32($seqid); - } else { - return - $this->writeString($name) + - $this->writeByte($type) + - $this->writeI32($seqid); - } - } - - public function writeMessageEnd() - { - return 0; - } - - public function writeStructBegin($name) - { - return 0; - } - - public function writeStructEnd() - { - return 0; - } - - public function writeFieldBegin($fieldName, $fieldType, $fieldId) - { - return - $this->writeByte($fieldType) + - $this->writeI16($fieldId); - } - - public function writeFieldEnd() - { - return 0; - } - - public function writeFieldStop() - { - return - $this->writeByte(TType::STOP); - } - - public function writeMapBegin($keyType, $valType, $size) - { - return - $this->writeByte($keyType) + - $this->writeByte($valType) + - $this->writeI32($size); - } - - public function writeMapEnd() - { - return 0; - } - - public function writeListBegin($elemType, $size) - { - return - $this->writeByte($elemType) + - $this->writeI32($size); - } - - public function writeListEnd() - { - return 0; - } - - public function writeSetBegin($elemType, $size) - { - return - $this->writeByte($elemType) + - $this->writeI32($size); - } - - public function writeSetEnd() - { - return 0; - } - - public function writeBool($value) - { - $data = pack('c', $value ? 1 : 0); - $this->trans_->write($data, 1); - - return 1; - } - - public function writeByte($value) - { - $data = pack('c', $value); - $this->trans_->write($data, 1); - - return 1; - } - - public function writeI16($value) - { - $data = pack('n', $value); - $this->trans_->write($data, 2); - - return 2; - } - - public function writeI32($value) - { - $data = pack('N', $value); - $this->trans_->write($data, 4); - - return 4; - } - - public function writeI64($value) - { - // If we are on a 32bit architecture we have to explicitly deal with - // 64-bit twos-complement arithmetic since PHP wants to treat all ints - // as signed and any int over 2^31 - 1 as a float - if (PHP_INT_SIZE == 4) { - $neg = $value < 0; - - if ($neg) { - $value *= -1; - } - - $hi = (int) ($value / 4294967296); - $lo = (int) $value; - - if ($neg) { - $hi = ~$hi; - $lo = ~$lo; - if (($lo & (int) 0xffffffff) == (int) 0xffffffff) { - $lo = 0; - $hi++; - } else { - $lo++; - } - } - $data = pack('N2', $hi, $lo); - - } else { - $hi = $value >> 32; - $lo = $value & 0xFFFFFFFF; - $data = pack('N2', $hi, $lo); - } - - $this->trans_->write($data, 8); - - return 8; - } - - public function writeDouble($value) - { - $data = pack('d', $value); - $this->trans_->write(strrev($data), 8); - - return 8; - } - - public function writeString($value) - { - $len = TStringFuncFactory::create()->strlen($value); - $result = $this->writeI32($len); - if ($len) { - $this->trans_->write($value, $len); - } - - return $result + $len; - } - - public function readMessageBegin(&$name, &$type, &$seqid) - { - $result = $this->readI32($sz); - if ($sz < 0) { - $version = (int) ($sz & self::VERSION_MASK); - if ($version != (int) self::VERSION_1) { - throw new TProtocolException('Bad version identifier: '.$sz, TProtocolException::BAD_VERSION); - } - $type = $sz & 0x000000ff; - $result += - $this->readString($name) + - $this->readI32($seqid); - } else { - if ($this->strictRead_) { - throw new TProtocolException('No version identifier, old protocol client?', TProtocolException::BAD_VERSION); - } else { - // Handle pre-versioned input - $name = $this->trans_->readAll($sz); - $result += - $sz + - $this->readByte($type) + - $this->readI32($seqid); - } - } - - return $result; - } - - public function readMessageEnd() - { - return 0; - } - - public function readStructBegin(&$name) - { - $name = ''; - - return 0; - } - - public function readStructEnd() - { - return 0; - } - - public function readFieldBegin(&$name, &$fieldType, &$fieldId) - { - $result = $this->readByte($fieldType); - if ($fieldType == TType::STOP) { - $fieldId = 0; - - return $result; - } - $result += $this->readI16($fieldId); - - return $result; - } - - public function readFieldEnd() - { - return 0; - } - - public function readMapBegin(&$keyType, &$valType, &$size) - { - return - $this->readByte($keyType) + - $this->readByte($valType) + - $this->readI32($size); - } - - public function readMapEnd() - { - return 0; - } - - public function readListBegin(&$elemType, &$size) - { - return - $this->readByte($elemType) + - $this->readI32($size); - } - - public function readListEnd() - { - return 0; - } - - public function readSetBegin(&$elemType, &$size) - { - return - $this->readByte($elemType) + - $this->readI32($size); - } - - public function readSetEnd() - { - return 0; - } - - public function readBool(&$value) - { - $data = $this->trans_->readAll(1); - $arr = unpack('c', $data); - $value = $arr[1] == 1; - - return 1; - } - - public function readByte(&$value) - { - $data = $this->trans_->readAll(1); - $arr = unpack('c', $data); - $value = $arr[1]; - - return 1; - } - - public function readI16(&$value) - { - $data = $this->trans_->readAll(2); - $arr = unpack('n', $data); - $value = $arr[1]; - if ($value > 0x7fff) { - $value = 0 - (($value - 1) ^ 0xffff); - } - - return 2; - } - - public function readI32(&$value) - { - $data = $this->trans_->readAll(4); - $arr = unpack('N', $data); - $value = $arr[1]; - if ($value > 0x7fffffff) { - $value = 0 - (($value - 1) ^ 0xffffffff); - } - - return 4; - } - - public function readI64(&$value) - { - $data = $this->trans_->readAll(8); - - $arr = unpack('N2', $data); - - // If we are on a 32bit architecture we have to explicitly deal with - // 64-bit twos-complement arithmetic since PHP wants to treat all ints - // as signed and any int over 2^31 - 1 as a float - if (PHP_INT_SIZE == 4) { - - $hi = $arr[1]; - $lo = $arr[2]; - $isNeg = $hi < 0; - - // Check for a negative - if ($isNeg) { - $hi = ~$hi & (int) 0xffffffff; - $lo = ~$lo & (int) 0xffffffff; - - if ($lo == (int) 0xffffffff) { - $hi++; - $lo = 0; - } else { - $lo++; - } - } - - // Force 32bit words in excess of 2G to pe positive - we deal wigh sign - // explicitly below - - if ($hi & (int) 0x80000000) { - $hi &= (int) 0x7fffffff; - $hi += 0x80000000; - } - - if ($lo & (int) 0x80000000) { - $lo &= (int) 0x7fffffff; - $lo += 0x80000000; - } - - $value = $hi * 4294967296 + $lo; - - if ($isNeg) { - $value = 0 - $value; - } - } else { - - // Upcast negatives in LSB bit - if ($arr[2] & 0x80000000) { - $arr[2] = $arr[2] & 0xffffffff; - } - - // Check for a negative - if ($arr[1] & 0x80000000) { - $arr[1] = $arr[1] & 0xffffffff; - $arr[1] = $arr[1] ^ 0xffffffff; - $arr[2] = $arr[2] ^ 0xffffffff; - $value = 0 - $arr[1]*4294967296 - $arr[2] - 1; - } else { - $value = $arr[1]*4294967296 + $arr[2]; - } - } - - return 8; - } - - public function readDouble(&$value) - { - $data = strrev($this->trans_->readAll(8)); - $arr = unpack('d', $data); - $value = $arr[1]; - - return 8; - } - - public function readString(&$value) - { - $result = $this->readI32($len); - if ($len) { - $value = $this->trans_->readAll($len); - } else { - $value = ''; - } - - return $result + $len; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TBinaryProtocolAccelerated.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TBinaryProtocolAccelerated.php deleted file mode 100644 index f0e0bb99f..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TBinaryProtocolAccelerated.php +++ /dev/null @@ -1,65 +0,0 @@ -strictRead_; - } - public function isStrictWrite() - { - return $this->strictWrite_; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TCompactProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TCompactProtocol.php deleted file mode 100644 index c25b0501b..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TCompactProtocol.php +++ /dev/null @@ -1,739 +0,0 @@ - TCompactProtocol::COMPACT_STOP, - TType::BOOL => TCompactProtocol::COMPACT_TRUE, // used for collection - TType::BYTE => TCompactProtocol::COMPACT_BYTE, - TType::I16 => TCompactProtocol::COMPACT_I16, - TType::I32 => TCompactProtocol::COMPACT_I32, - TType::I64 => TCompactProtocol::COMPACT_I64, - TType::DOUBLE => TCompactProtocol::COMPACT_DOUBLE, - TType::STRING => TCompactProtocol::COMPACT_BINARY, - TType::STRUCT => TCompactProtocol::COMPACT_STRUCT, - TType::LST => TCompactProtocol::COMPACT_LIST, - TType::SET => TCompactProtocol::COMPACT_SET, - TType::MAP => TCompactProtocol::COMPACT_MAP, - ); - - protected static $ttypes = array( - TCompactProtocol::COMPACT_STOP => TType::STOP , - TCompactProtocol::COMPACT_TRUE => TType::BOOL, // used for collection - TCompactProtocol::COMPACT_FALSE => TType::BOOL, - TCompactProtocol::COMPACT_BYTE => TType::BYTE, - TCompactProtocol::COMPACT_I16 => TType::I16, - TCompactProtocol::COMPACT_I32 => TType::I32, - TCompactProtocol::COMPACT_I64 => TType::I64, - TCompactProtocol::COMPACT_DOUBLE => TType::DOUBLE, - TCompactProtocol::COMPACT_BINARY => TType::STRING, - TCompactProtocol::COMPACT_STRUCT => TType::STRUCT, - TCompactProtocol::COMPACT_LIST => TType::LST, - TCompactProtocol::COMPACT_SET => TType::SET, - TCompactProtocol::COMPACT_MAP => TType::MAP, - ); - - protected $state = TCompactProtocol::STATE_CLEAR; - protected $lastFid = 0; - protected $boolFid = null; - protected $boolValue = null; - protected $structs = array(); - protected $containers = array(); - - // Some varint / zigzag helper methods - public function toZigZag($n, $bits) - { - return ($n << 1) ^ ($n >> ($bits - 1)); - } - - public function fromZigZag($n) - { - return ($n >> 1) ^ -($n & 1); - } - - public function getVarint($data) - { - $out = ""; - while (true) { - if (($data & ~0x7f) === 0) { - $out .= chr($data); - break; - } else { - $out .= chr(($data & 0xff) | 0x80); - $data = $data >> 7; - } - } - - return $out; - } - - public function writeVarint($data) - { - $out = $this->getVarint($data); - $result = TStringFuncFactory::create()->strlen($out); - $this->trans_->write($out, $result); - - return $result; - } - - public function readVarint(&$result) - { - $idx = 0; - $shift = 0; - $result = 0; - while (true) { - $x = $this->trans_->readAll(1); - $arr = unpack('C', $x); - $byte = $arr[1]; - $idx += 1; - $result |= ($byte & 0x7f) << $shift; - if (($byte >> 7) === 0) { - return $idx; - } - $shift += 7; - } - - return $idx; - } - - public function __construct($trans) - { - parent::__construct($trans); - } - - public function writeMessageBegin($name, $type, $seqid) - { - $written = - $this->writeUByte(TCompactProtocol::PROTOCOL_ID) + - $this->writeUByte(TCompactProtocol::VERSION | - ($type << TCompactProtocol::TYPE_SHIFT_AMOUNT)) + - $this->writeVarint($seqid) + - $this->writeString($name); - $this->state = TCompactProtocol::STATE_VALUE_WRITE; - - return $written; - } - - public function writeMessageEnd() - { - $this->state = TCompactProtocol::STATE_CLEAR; - - return 0; - } - - public function writeStructBegin($name) - { - $this->structs[] = array($this->state, $this->lastFid); - $this->state = TCompactProtocol::STATE_FIELD_WRITE; - $this->lastFid = 0; - - return 0; - } - - public function writeStructEnd() - { - $old_values = array_pop($this->structs); - $this->state = $old_values[0]; - $this->lastFid = $old_values[1]; - - return 0; - } - - public function writeFieldStop() - { - return $this->writeByte(0); - } - - public function writeFieldHeader($type, $fid) - { - $written = 0; - $delta = $fid - $this->lastFid; - if (0 < $delta && $delta <= 15) { - $written = $this->writeUByte(($delta << 4) | $type); - } else { - $written = $this->writeByte($type) + - $this->writeI16($fid); - } - $this->lastFid = $fid; - - return $written; - } - - public function writeFieldBegin($field_name, $field_type, $field_id) - { - if ($field_type == TTYPE::BOOL) { - $this->state = TCompactProtocol::STATE_BOOL_WRITE; - $this->boolFid = $field_id; - - return 0; - } else { - $this->state = TCompactProtocol::STATE_VALUE_WRITE; - - return $this->writeFieldHeader(self::$ctypes[$field_type], $field_id); - } - } - - public function writeFieldEnd() - { - $this->state = TCompactProtocol::STATE_FIELD_WRITE; - - return 0; - } - - public function writeCollectionBegin($etype, $size) - { - $written = 0; - if ($size <= 14) { - $written = $this->writeUByte($size << 4 | - self::$ctypes[$etype]); - } else { - $written = $this->writeUByte(0xf0 | - self::$ctypes[$etype]) + - $this->writeVarint($size); - } - $this->containers[] = $this->state; - $this->state = TCompactProtocol::STATE_CONTAINER_WRITE; - - return $written; - } - - public function writeMapBegin($key_type, $val_type, $size) - { - $written = 0; - if ($size == 0) { - $written = $this->writeByte(0); - } else { - $written = $this->writeVarint($size) + - $this->writeUByte(self::$ctypes[$key_type] << 4 | - self::$ctypes[$val_type]); - } - $this->containers[] = $this->state; - - return $written; - } - - public function writeCollectionEnd() - { - $this->state = array_pop($this->containers); - - return 0; - } - - public function writeMapEnd() - { - return $this->writeCollectionEnd(); - } - - public function writeListBegin($elem_type, $size) - { - return $this->writeCollectionBegin($elem_type, $size); - } - - public function writeListEnd() - { - return $this->writeCollectionEnd(); - } - - public function writeSetBegin($elem_type, $size) - { - return $this->writeCollectionBegin($elem_type, $size); - } - - public function writeSetEnd() - { - return $this->writeCollectionEnd(); - } - - public function writeBool($value) - { - if ($this->state == TCompactProtocol::STATE_BOOL_WRITE) { - $ctype = TCompactProtocol::COMPACT_FALSE; - if ($value) { - $ctype = TCompactProtocol::COMPACT_TRUE; - } - - return $this->writeFieldHeader($ctype, $this->boolFid); - } elseif ($this->state == TCompactProtocol::STATE_CONTAINER_WRITE) { - return $this->writeByte($value ? 1 : 0); - } else { - throw new TProtocolException('Invalid state in compact protocol'); - } - } - - public function writeByte($value) - { - $data = pack('c', $value); - $this->trans_->write($data, 1); - - return 1; - } - - public function writeUByte($byte) - { - $this->trans_->write(pack('C', $byte), 1); - - return 1; - } - - public function writeI16($value) - { - $thing = $this->toZigZag($value, 16); - - return $this->writeVarint($thing); - } - - public function writeI32($value) - { - $thing = $this->toZigZag($value, 32); - - return $this->writeVarint($thing); - } - - public function writeDouble($value) - { - $data = pack('d', $value); - $this->trans_->write($data, 8); - - return 8; - } - - public function writeString($value) - { - $len = TStringFuncFactory::create()->strlen($value); - $result = $this->writeVarint($len); - if ($len) { - $this->trans_->write($value, $len); - } - - return $result + $len; - } - - public function readFieldBegin(&$name, &$field_type, &$field_id) - { - $result = $this->readUByte($compact_type_and_delta); - - $compact_type = $compact_type_and_delta & 0x0f; - - if ($compact_type == TType::STOP) { - $field_type = $compact_type; - $field_id = 0; - - return $result; - } - $delta = $compact_type_and_delta >> 4; - if ($delta == 0) { - $result += $this->readI16($field_id); - } else { - $field_id = $this->lastFid + $delta; - } - $this->lastFid = $field_id; - $field_type = $this->getTType($compact_type); - - if ($compact_type == TCompactProtocol::COMPACT_TRUE) { - $this->state = TCompactProtocol::STATE_BOOL_READ; - $this->boolValue = true; - } elseif ($compact_type == TCompactProtocol::COMPACT_FALSE) { - $this->state = TCompactProtocol::STATE_BOOL_READ; - $this->boolValue = false; - } else { - $this->state = TCompactProtocol::STATE_VALUE_READ; - } - - return $result; - } - - public function readFieldEnd() - { - $this->state = TCompactProtocol::STATE_FIELD_READ; - - return 0; - } - - public function readUByte(&$value) - { - $data = $this->trans_->readAll(1); - $arr = unpack('C', $data); - $value = $arr[1]; - - return 1; - } - - public function readByte(&$value) - { - $data = $this->trans_->readAll(1); - $arr = unpack('c', $data); - $value = $arr[1]; - - return 1; - } - - public function readZigZag(&$value) - { - $result = $this->readVarint($value); - $value = $this->fromZigZag($value); - - return $result; - } - - public function readMessageBegin(&$name, &$type, &$seqid) - { - $protoId = 0; - $result = $this->readUByte($protoId); - if ($protoId != TCompactProtocol::PROTOCOL_ID) { - throw new TProtocolException('Bad protocol id in TCompact message'); - } - $verType = 0; - $result += $this->readUByte($verType); - $type = ($verType >> TCompactProtocol::TYPE_SHIFT_AMOUNT) & TCompactProtocol::TYPE_BITS; - $version = $verType & TCompactProtocol::VERSION_MASK; - if ($version != TCompactProtocol::VERSION) { - throw new TProtocolException('Bad version in TCompact message'); - } - $result += $this->readVarint($seqid); - $result += $this->readString($name); - - return $result; - } - - public function readMessageEnd() - { - return 0; - } - - public function readStructBegin(&$name) - { - $name = ''; // unused - $this->structs[] = array($this->state, $this->lastFid); - $this->state = TCompactProtocol::STATE_FIELD_READ; - $this->lastFid = 0; - - return 0; - } - - public function readStructEnd() - { - $last = array_pop($this->structs); - $this->state = $last[0]; - $this->lastFid = $last[1]; - - return 0; - } - - public function readCollectionBegin(&$type, &$size) - { - $sizeType = 0; - $result = $this->readUByte($sizeType); - $size = $sizeType >> 4; - $type = $this->getTType($sizeType); - if ($size == 15) { - $result += $this->readVarint($size); - } - $this->containers[] = $this->state; - $this->state = TCompactProtocol::STATE_CONTAINER_READ; - - return $result; - } - - public function readMapBegin(&$key_type, &$val_type, &$size) - { - $result = $this->readVarint($size); - $types = 0; - if ($size > 0) { - $result += $this->readUByte($types); - } - $val_type = $this->getTType($types); - $key_type = $this->getTType($types >> 4); - $this->containers[] = $this->state; - $this->state = TCompactProtocol::STATE_CONTAINER_READ; - - return $result; - } - - public function readCollectionEnd() - { - $this->state = array_pop($this->containers); - - return 0; - } - - public function readMapEnd() - { - return $this->readCollectionEnd(); - } - - public function readListBegin(&$elem_type, &$size) - { - return $this->readCollectionBegin($elem_type, $size); - } - - public function readListEnd() - { - return $this->readCollectionEnd(); - } - - public function readSetBegin(&$elem_type, &$size) - { - return $this->readCollectionBegin($elem_type, $size); - } - - public function readSetEnd() - { - return $this->readCollectionEnd(); - } - - public function readBool(&$value) - { - if ($this->state == TCompactProtocol::STATE_BOOL_READ) { - $value = $this->boolValue; - - return 0; - } elseif ($this->state == TCompactProtocol::STATE_CONTAINER_READ) { - return $this->readByte($value); - } else { - throw new TProtocolException('Invalid state in compact protocol'); - } - } - - public function readI16(&$value) - { - return $this->readZigZag($value); - } - - public function readI32(&$value) - { - return $this->readZigZag($value); - } - - public function readDouble(&$value) - { - $data = $this->trans_->readAll(8); - $arr = unpack('d', $data); - $value = $arr[1]; - - return 8; - } - - public function readString(&$value) - { - $result = $this->readVarint($len); - if ($len) { - $value = $this->trans_->readAll($len); - } else { - $value = ''; - } - - return $result + $len; - } - - public function getTType($byte) - { - return self::$ttypes[$byte & 0x0f]; - } - - // If we are on a 32bit architecture we have to explicitly deal with - // 64-bit twos-complement arithmetic since PHP wants to treat all ints - // as signed and any int over 2^31 - 1 as a float - - // Read and write I64 as two 32 bit numbers $hi and $lo - - public function readI64(&$value) - { - // Read varint from wire - $hi = 0; - $lo = 0; - - $idx = 0; - $shift = 0; - - while (true) { - $x = $this->trans_->readAll(1); - $arr = unpack('C', $x); - $byte = $arr[1]; - $idx += 1; - // Shift hi and lo together. - if ($shift < 28) { - $lo |= (($byte & 0x7f) << $shift); - } elseif ($shift == 28) { - $lo |= (($byte & 0x0f) << 28); - $hi |= (($byte & 0x70) >> 4); - } else { - $hi |= (($byte & 0x7f) << ($shift - 32)); - } - if (($byte >> 7) === 0) { - break; - } - $shift += 7; - } - - // Now, unzig it. - $xorer = 0; - if ($lo & 1) { - $xorer = 0xffffffff; - } - $lo = ($lo >> 1) & 0x7fffffff; - $lo = $lo | (($hi & 1) << 31); - $hi = ($hi >> 1) ^ $xorer; - $lo = $lo ^ $xorer; - - // Now put $hi and $lo back together - $isNeg = $hi < 0 || $hi & 0x80000000; - - // Check for a negative - if ($isNeg) { - $hi = ~$hi & (int) 0xffffffff; - $lo = ~$lo & (int) 0xffffffff; - - if ($lo == (int) 0xffffffff) { - $hi++; - $lo = 0; - } else { - $lo++; - } - } - - // Force 32bit words in excess of 2G to be positive - we deal with sign - // explicitly below - - if ($hi & (int) 0x80000000) { - $hi &= (int) 0x7fffffff; - $hi += 0x80000000; - } - - if ($lo & (int) 0x80000000) { - $lo &= (int) 0x7fffffff; - $lo += 0x80000000; - } - - // Create as negative value first, since we can store -2^63 but not 2^63 - $value = -$hi * 4294967296 - $lo; - - if (!$isNeg) { - $value = -$value; - } - - return $idx; - } - - public function writeI64($value) - { - // If we are in an I32 range, use the easy method below. - if (($value > 4294967296) || ($value < -4294967296)) { - // Convert $value to $hi and $lo - $neg = $value < 0; - - if ($neg) { - $value *= -1; - } - - $hi = (int) $value >> 32; - $lo = (int) $value & 0xffffffff; - - if ($neg) { - $hi = ~$hi; - $lo = ~$lo; - if (($lo & (int) 0xffffffff) == (int) 0xffffffff) { - $lo = 0; - $hi++; - } else { - $lo++; - } - } - - // Now do the zigging and zagging. - $xorer = 0; - if ($neg) { - $xorer = 0xffffffff; - } - $lowbit = ($lo >> 31) & 1; - $hi = ($hi << 1) | $lowbit; - $lo = ($lo << 1); - $lo = ($lo ^ $xorer) & 0xffffffff; - $hi = ($hi ^ $xorer) & 0xffffffff; - - // now write out the varint, ensuring we shift both hi and lo - $out = ""; - while (true) { - if (($lo & ~0x7f) === 0 && - $hi === 0) { - $out .= chr($lo); - break; - } else { - $out .= chr(($lo & 0xff) | 0x80); - $lo = $lo >> 7; - $lo = $lo | ($hi << 25); - $hi = $hi >> 7; - // Right shift carries sign, but we don't want it to. - $hi = $hi & (127 << 25); - } - } - - $ret = TStringFuncFactory::create()->strlen($out); - $this->trans_->write($out, $ret); - - return $ret; - } else { - return $this->writeVarint($this->toZigZag($value, 64)); - } - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocol.php deleted file mode 100644 index 0e3bc0d0b..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Protocol/TProtocol.php +++ /dev/null @@ -1,352 +0,0 @@ -trans_ = $trans; - } - - /** - * Accessor for transport - * - * @return TTransport - */ - public function getTransport() - { - return $this->trans_; - } - - /** - * Writes the message header - * - * @param string $name Function name - * @param int $type message type TMessageType::CALL or TMessageType::REPLY - * @param int $seqid The sequence id of this message - */ - abstract public function writeMessageBegin($name, $type, $seqid); - - /** - * Close the message - */ - abstract public function writeMessageEnd(); - - /** - * Writes a struct header. - * - * @param string $name Struct name - * @throws TException on write error - * @return int How many bytes written - */ - abstract public function writeStructBegin($name); - - /** - * Close a struct. - * - * @throws TException on write error - * @return int How many bytes written - */ - abstract public function writeStructEnd(); - - /* - * Starts a field. - * - * @param string $name Field name - * @param int $type Field type - * @param int $fid Field id - * @throws TException on write error - * @return int How many bytes written - */ - abstract public function writeFieldBegin($fieldName, $fieldType, $fieldId); - - abstract public function writeFieldEnd(); - - abstract public function writeFieldStop(); - - abstract public function writeMapBegin($keyType, $valType, $size); - - abstract public function writeMapEnd(); - - abstract public function writeListBegin($elemType, $size); - - abstract public function writeListEnd(); - - abstract public function writeSetBegin($elemType, $size); - - abstract public function writeSetEnd(); - - abstract public function writeBool($bool); - - abstract public function writeByte($byte); - - abstract public function writeI16($i16); - - abstract public function writeI32($i32); - - abstract public function writeI64($i64); - - abstract public function writeDouble($dub); - - abstract public function writeString($str); - - /** - * Reads the message header - * - * @param string $name Function name - * @param int $type message type TMessageType::CALL or TMessageType::REPLY - * @parem int $seqid The sequence id of this message - */ - abstract public function readMessageBegin(&$name, &$type, &$seqid); - - /** - * Read the close of message - */ - abstract public function readMessageEnd(); - - abstract public function readStructBegin(&$name); - - abstract public function readStructEnd(); - - abstract public function readFieldBegin(&$name, &$fieldType, &$fieldId); - - abstract public function readFieldEnd(); - - abstract public function readMapBegin(&$keyType, &$valType, &$size); - - abstract public function readMapEnd(); - - abstract public function readListBegin(&$elemType, &$size); - - abstract public function readListEnd(); - - abstract public function readSetBegin(&$elemType, &$size); - - abstract public function readSetEnd(); - - abstract public function readBool(&$bool); - - abstract public function readByte(&$byte); - - abstract public function readI16(&$i16); - - abstract public function readI32(&$i32); - - abstract public function readI64(&$i64); - - abstract public function readDouble(&$dub); - - abstract public function readString(&$str); - - /** - * The skip function is a utility to parse over unrecognized date without - * causing corruption. - * - * @param TType $type What type is it - */ - public function skip($type) - { - switch ($type) { - case TType::BOOL: - return $this->readBool($bool); - case TType::BYTE: - return $this->readByte($byte); - case TType::I16: - return $this->readI16($i16); - case TType::I32: - return $this->readI32($i32); - case TType::I64: - return $this->readI64($i64); - case TType::DOUBLE: - return $this->readDouble($dub); - case TType::STRING: - return $this->readString($str); - case TType::STRUCT: - { - $result = $this->readStructBegin($name); - while (true) { - $result += $this->readFieldBegin($name, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - $result += $this->skip($ftype); - $result += $this->readFieldEnd(); - } - $result += $this->readStructEnd(); - - return $result; - } - case TType::MAP: - { - $result = $this->readMapBegin($keyType, $valType, $size); - for ($i = 0; $i < $size; $i++) { - $result += $this->skip($keyType); - $result += $this->skip($valType); - } - $result += $this->readMapEnd(); - - return $result; - } - case TType::SET: - { - $result = $this->readSetBegin($elemType, $size); - for ($i = 0; $i < $size; $i++) { - $result += $this->skip($elemType); - } - $result += $this->readSetEnd(); - - return $result; - } - case TType::LST: - { - $result = $this->readListBegin($elemType, $size); - for ($i = 0; $i < $size; $i++) { - $result += $this->skip($elemType); - } - $result += $this->readListEnd(); - - return $result; - } - default: - throw new TProtocolException('Unknown field type: '.$type, - TProtocolException::INVALID_DATA); - } - } - - /** - * Utility for skipping binary data - * - * @param TTransport $itrans TTransport object - * @param int $type Field type - */ - public static function skipBinary($itrans, $type) - { - switch ($type) { - case TType::BOOL: - return $itrans->readAll(1); - case TType::BYTE: - return $itrans->readAll(1); - case TType::I16: - return $itrans->readAll(2); - case TType::I32: - return $itrans->readAll(4); - case TType::I64: - return $itrans->readAll(8); - case TType::DOUBLE: - return $itrans->readAll(8); - case TType::STRING: - $len = unpack('N', $itrans->readAll(4)); - $len = $len[1]; - if ($len > 0x7fffffff) { - $len = 0 - (($len - 1) ^ 0xffffffff); - } - - return 4 + $itrans->readAll($len); - case TType::STRUCT: - { - $result = 0; - while (true) { - $ftype = 0; - $fid = 0; - $data = $itrans->readAll(1); - $arr = unpack('c', $data); - $ftype = $arr[1]; - if ($ftype == TType::STOP) { - break; - } - // I16 field id - $result += $itrans->readAll(2); - $result += self::skipBinary($itrans, $ftype); - } - - return $result; - } - case TType::MAP: - { - // Ktype - $data = $itrans->readAll(1); - $arr = unpack('c', $data); - $ktype = $arr[1]; - // Vtype - $data = $itrans->readAll(1); - $arr = unpack('c', $data); - $vtype = $arr[1]; - // Size - $data = $itrans->readAll(4); - $arr = unpack('N', $data); - $size = $arr[1]; - if ($size > 0x7fffffff) { - $size = 0 - (($size - 1) ^ 0xffffffff); - } - $result = 6; - for ($i = 0; $i < $size; $i++) { - $result += self::skipBinary($itrans, $ktype); - $result += self::skipBinary($itrans, $vtype); - } - - return $result; - } - case TType::SET: - case TType::LST: - { - // Vtype - $data = $itrans->readAll(1); - $arr = unpack('c', $data); - $vtype = $arr[1]; - // Size - $data = $itrans->readAll(4); - $arr = unpack('N', $data); - $size = $arr[1]; - if ($size > 0x7fffffff) { - $size = 0 - (($size - 1) ^ 0xffffffff); - } - $result = 5; - for ($i = 0; $i < $size; $i++) { - $result += self::skipBinary($itrans, $vtype); - } - - return $result; - } - default: - throw new TProtocolException('Unknown field type: '.$type, - TProtocolException::INVALID_DATA); - } - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Serializer/TBinarySerializer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Serializer/TBinarySerializer.php deleted file mode 100644 index aa2f71b40..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Serializer/TBinarySerializer.php +++ /dev/null @@ -1,85 +0,0 @@ -getName(), - TMessageType::REPLY, $object, - 0, $protocol->isStrictWrite()); - - $protocol->readMessageBegin($unused_name, $unused_type, - $unused_seqid); - } else { - $object->write($protocol); - } - $protocol->getTransport()->flush(); - - return $transport->getBuffer(); - } - - public static function deserialize($string_object, $class_name, $buffer_size = 8192) - { - $transport = new TMemoryBuffer(); - $protocol = new TBinaryProtocolAccelerated($transport); - if (function_exists('thrift_protocol_read_binary')) { - // NOTE (t.heintz) TBinaryProtocolAccelerated internally wraps our TMemoryBuffer in a - // TBufferedTransport, so we have to retrieve it again or risk losing data when writing - // less than 512 bytes to the transport (see the comment there as well). - // @see THRIFT-1579 - $protocol->writeMessageBegin('', TMessageType::REPLY, 0); - $protocolTransport = $protocol->getTransport(); - $protocolTransport->write($string_object); - $protocolTransport->flush(); - - return thrift_protocol_read_binary($protocol, $class_name, - $protocol->isStrictRead(), - $buffer_size); - } else { - $transport->write($string_object); - $object = new $class_name(); - $object->read($protocol); - - return $object; - } - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TForkingServer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TForkingServer.php deleted file mode 100644 index 7f6e541cd..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TForkingServer.php +++ /dev/null @@ -1,120 +0,0 @@ -transport_->listen(); - - while (!$this->stop_) { - try { - $transport = $this->transport_->accept(); - - if ($transport != null) { - $pid = pcntl_fork(); - - if ($pid > 0) { - $this->handleParent($transport, $pid); - } elseif ($pid === 0) { - $this->handleChild($transport); - } else { - throw new TException('Failed to fork'); - } - } - } catch (TTransportException $e) { } - - $this->collectChildren(); - } - } - - /** - * Code run by the parent - * - * @param TTransport $transport - * @param int $pid - * @return void - */ - private function handleParent(TTransport $transport, $pid) - { - $this->children_[$pid] = $transport; - } - - /** - * Code run by the child. - * - * @param TTransport $transport - * @return void - */ - private function handleChild(TTransport $transport) - { - try { - $inputTransport = $this->inputTransportFactory_->getTransport($transport); - $outputTransport = $this->outputTransportFactory_->getTransport($transport); - $inputProtocol = $this->inputProtocolFactory_->getProtocol($inputTransport); - $outputProtocol = $this->outputProtocolFactory_->getProtocol($outputTransport); - while ($this->processor_->process($inputProtocol, $outputProtocol)) { } - @$transport->close(); - } catch (TTransportException $e) { } - - exit(0); - } - - /** - * Collects any children we may have - * - * @return void - */ - private function collectChildren() - { - foreach ($this->children_ as $pid => $transport) { - if (pcntl_waitpid($pid, $status, WNOHANG) > 0) { - unset($this->children_[$pid]); - if ($transport) @$transport->close(); - } - } - } - - /** - * Stops the server running. Kills the transport - * and then stops the main serving loop - * - * @return void - */ - public function stop() - { - $this->transport_->close(); - $this->stop_ = true; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSSLServerSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSSLServerSocket.php deleted file mode 100644 index dfc470430..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSSLServerSocket.php +++ /dev/null @@ -1,94 +0,0 @@ -getSSLHost($host); - parent::__construct($ssl_host, $port); - $this->context_ = $context; - } - - public function getSSLHost($host) - { - $transport_protocol_loc = strpos($host, "://"); - if ($transport_protocol_loc === false) { - $host = 'ssl://'.$host; - } - return $host; - } - - /** - * Opens a new socket server handle - * - * @return void - */ - public function listen() - { - $this->listener_ = @stream_socket_server( - $this->host_ . ':' . $this->port_, - $errno, - $errstr, - STREAM_SERVER_BIND|STREAM_SERVER_LISTEN, - $this->context_); - } - - /** - * Implementation of accept. If not client is accepted in the given time - * - * @return TSocket - */ - protected function acceptImpl() - { - $handle = @stream_socket_accept($this->listener_, $this->acceptTimeout_ / 1000.0); - if(!$handle) return null; - - $socket = new TSSLSocket(); - $socket->setHandle($handle); - - return $socket; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServer.php deleted file mode 100644 index f4d76cc15..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServer.php +++ /dev/null @@ -1,100 +0,0 @@ -processor_ = $processor; - $this->transport_ = $transport; - $this->inputTransportFactory_ = $inputTransportFactory; - $this->outputTransportFactory_ = $outputTransportFactory; - $this->inputProtocolFactory_ = $inputProtocolFactory; - $this->outputProtocolFactory_ = $outputProtocolFactory; - } - - /** - * Serves the server. This should never return - * unless a problem permits it to do so or it - * is interrupted intentionally - * - * @abstract - * @return void - */ - abstract public function serve(); - - /** - * Stops the server serving - * - * @abstract - * @return void - */ - abstract public function stop(); -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerSocket.php deleted file mode 100644 index da8e22683..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerSocket.php +++ /dev/null @@ -1,122 +0,0 @@ -host_ = $host; - $this->port_ = $port; - } - - /** - * Sets the accept timeout - * - * @param int $acceptTimeout - * @return void - */ - public function setAcceptTimeout($acceptTimeout) - { - $this->acceptTimeout_ = $acceptTimeout; - } - - /** - * Opens a new socket server handle - * - * @return void - */ - public function listen() - { - $this->listener_ = stream_socket_server('tcp://' . $this->host_ . ':' . $this->port_); - } - - /** - * Closes the socket server handle - * - * @return void - */ - public function close() - { - @fclose($this->listener_); - $this->listener_ = null; - } - - /** - * Implementation of accept. If not client is accepted in the given time - * - * @return TSocket - */ - protected function acceptImpl() - { - $handle = @stream_socket_accept($this->listener_, $this->acceptTimeout_ / 1000.0); - if(!$handle) return null; - - $socket = new TSocket(); - $socket->setHandle($handle); - - return $socket; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerTransport.php deleted file mode 100644 index f82d06d1d..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TServerTransport.php +++ /dev/null @@ -1,56 +0,0 @@ -acceptImpl(); - - if ($transport == null) { - throw new TTransportException("accept() may not return NULL"); - } - - return $transport; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSimpleServer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSimpleServer.php deleted file mode 100644 index e277700e8..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Server/TSimpleServer.php +++ /dev/null @@ -1,58 +0,0 @@ -transport_->listen(); - - while (!$this->stop_) { - try { - $transport = $this->transport_->accept(); - - if ($transport != null) { - $inputTransport = $this->inputTransportFactory_->getTransport($transport); - $outputTransport = $this->outputTransportFactory_->getTransport($transport); - $inputProtocol = $this->inputProtocolFactory_->getProtocol($inputTransport); - $outputProtocol = $this->outputProtocolFactory_->getProtocol($outputTransport); - while ($this->processor_->process($inputProtocol, $outputProtocol)) { } - } - } catch (TTransportException $e) { } - } - } - - /** - * Stops the server running. Kills the transport - * and then stops the main serving loop - * - * @return void - */ - public function stop() - { - $this->transport_->close(); - $this->stop_ = true; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TBufferedTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TBufferedTransport.php deleted file mode 100644 index f654ad3ee..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TBufferedTransport.php +++ /dev/null @@ -1,181 +0,0 @@ -transport_ = $transport; - $this->rBufSize_ = $rBufSize; - $this->wBufSize_ = $wBufSize; - } - - /** - * The underlying transport - * - * @var TTransport - */ - protected $transport_ = null; - - /** - * The receive buffer size - * - * @var int - */ - protected $rBufSize_ = 512; - - /** - * The write buffer size - * - * @var int - */ - protected $wBufSize_ = 512; - - /** - * The write buffer. - * - * @var string - */ - protected $wBuf_ = ''; - - /** - * The read buffer. - * - * @var string - */ - protected $rBuf_ = ''; - - public function isOpen() - { - return $this->transport_->isOpen(); - } - - public function open() - { - $this->transport_->open(); - } - - public function close() - { - $this->transport_->close(); - } - - public function putBack($data) - { - if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { - $this->rBuf_ = $data; - } else { - $this->rBuf_ = ($data . $this->rBuf_); - } - } - - /** - * The reason that we customize readAll here is that the majority of PHP - * streams are already internally buffered by PHP. The socket stream, for - * example, buffers internally and blocks if you call read with $len greater - * than the amount of data available, unlike recv() in C. - * - * Therefore, use the readAll method of the wrapped transport inside - * the buffered readAll. - */ - public function readAll($len) - { - $have = TStringFuncFactory::create()->strlen($this->rBuf_); - if ($have == 0) { - $data = $this->transport_->readAll($len); - } elseif ($have < $len) { - $data = $this->rBuf_; - $this->rBuf_ = ''; - $data .= $this->transport_->readAll($len - $have); - } elseif ($have == $len) { - $data = $this->rBuf_; - $this->rBuf_ = ''; - } elseif ($have > $len) { - $data = TStringFuncFactory::create()->substr($this->rBuf_, 0, $len); - $this->rBuf_ = TStringFuncFactory::create()->substr($this->rBuf_, $len); - } - - return $data; - } - - public function read($len) - { - if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { - $this->rBuf_ = $this->transport_->read($this->rBufSize_); - } - - if (TStringFuncFactory::create()->strlen($this->rBuf_) <= $len) { - $ret = $this->rBuf_; - $this->rBuf_ = ''; - - return $ret; - } - - $ret = TStringFuncFactory::create()->substr($this->rBuf_, 0, $len); - $this->rBuf_ = TStringFuncFactory::create()->substr($this->rBuf_, $len); - - return $ret; - } - - public function write($buf) - { - $this->wBuf_ .= $buf; - if (TStringFuncFactory::create()->strlen($this->wBuf_) >= $this->wBufSize_) { - $out = $this->wBuf_; - - // Note that we clear the internal wBuf_ prior to the underlying write - // to ensure we're in a sane state (i.e. internal buffer cleaned) - // if the underlying write throws up an exception - $this->wBuf_ = ''; - $this->transport_->write($out); - } - } - - public function flush() - { - if (TStringFuncFactory::create()->strlen($this->wBuf_) > 0) { - $out = $this->wBuf_; - - // Note that we clear the internal wBuf_ prior to the underlying write - // to ensure we're in a sane state (i.e. internal buffer cleaned) - // if the underlying write throws up an exception - $this->wBuf_ = ''; - $this->transport_->write($out); - } - $this->transport_->flush(); - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TCurlClient.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TCurlClient.php deleted file mode 100644 index c761cd025..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TCurlClient.php +++ /dev/null @@ -1,249 +0,0 @@ -strlen($uri) > 0) && ($uri{0} != '/')) { - $uri = '/'.$uri; - } - $this->scheme_ = $scheme; - $this->host_ = $host; - $this->port_ = $port; - $this->uri_ = $uri; - $this->request_ = ''; - $this->response_ = null; - $this->timeout_ = null; - $this->headers_ = array(); - } - - /** - * Set read timeout - * - * @param float $timeout - */ - public function setTimeoutSecs($timeout) - { - $this->timeout_ = $timeout; - } - - /** - * Whether this transport is open. - * - * @return boolean true if open - */ - public function isOpen() - { - return true; - } - - /** - * Open the transport for reading/writing - * - * @throws TTransportException if cannot open - */ - public function open() - { - } - - /** - * Close the transport. - */ - public function close() - { - $this->request_ = ''; - $this->response_ = null; - } - - /** - * Read some data into the array. - * - * @param int $len How much to read - * @return string The data that has been read - * @throws TTransportException if cannot read any more data - */ - public function read($len) - { - if ($len >= strlen($this->response_)) { - return $this->response_; - } else { - $ret = substr($this->response_, 0, $len); - $this->response_ = substr($this->response_, $len); - - return $ret; - } - } - - /** - * Writes some data into the pending buffer - * - * @param string $buf The data to write - * @throws TTransportException if writing fails - */ - public function write($buf) - { - $this->request_ .= $buf; - } - - /** - * Opens and sends the actual request over the HTTP connection - * - * @throws TTransportException if a writing error occurs - */ - public function flush() - { - if (!self::$curlHandle) { - register_shutdown_function(array('Thrift\\Transport\\TCurlClient', 'closeCurlHandle')); - self::$curlHandle = curl_init(); - curl_setopt(self::$curlHandle, CURLOPT_RETURNTRANSFER, true); - curl_setopt(self::$curlHandle, CURLOPT_BINARYTRANSFER, true); - curl_setopt(self::$curlHandle, CURLOPT_USERAGENT, 'PHP/TCurlClient'); - curl_setopt(self::$curlHandle, CURLOPT_CUSTOMREQUEST, 'POST'); - curl_setopt(self::$curlHandle, CURLOPT_FOLLOWLOCATION, true); - curl_setopt(self::$curlHandle, CURLOPT_MAXREDIRS, 1); - } - // God, PHP really has some esoteric ways of doing simple things. - $host = $this->host_.($this->port_ != 80 ? ':'.$this->port_ : ''); - $fullUrl = $this->scheme_."://".$host.$this->uri_; - - $headers = array(); - $defaultHeaders = array('Accept' => 'application/x-thrift', - 'Content-Type' => 'application/x-thrift', - 'Content-Length' => TStringFuncFactory::create()->strlen($this->request_)); - foreach (array_merge($defaultHeaders, $this->headers_) as $key => $value) { - $headers[] = "$key: $value"; - } - - curl_setopt(self::$curlHandle, CURLOPT_HTTPHEADER, $headers); - - if ($this->timeout_ > 0) { - curl_setopt(self::$curlHandle, CURLOPT_TIMEOUT, $this->timeout_); - } - curl_setopt(self::$curlHandle, CURLOPT_POSTFIELDS, $this->request_); - $this->request_ = ''; - - curl_setopt(self::$curlHandle, CURLOPT_URL, $fullUrl); - $this->response_ = curl_exec(self::$curlHandle); - - // Connect failed? - if (!$this->response_) { - curl_close(self::$curlHandle); - self::$curlHandle = null; - $error = 'TCurlClient: Could not connect to '.$fullUrl; - throw new TTransportException($error, TTransportException::NOT_OPEN); - } - } - - public static function closeCurlHandle() - { - try { - if (self::$curlHandle) { - curl_close(self::$curlHandle); - self::$curlHandle = null; - } - } catch (\Exception $x) { - error_log('There was an error closing the curl handle: ' . $x->getMessage()); - } - } - - public function addHeaders($headers) - { - $this->headers_ = array_merge($this->headers_, $headers); - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TFramedTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TFramedTransport.php deleted file mode 100644 index b8a64a9a9..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TFramedTransport.php +++ /dev/null @@ -1,193 +0,0 @@ -transport_ = $transport; - $this->read_ = $read; - $this->write_ = $write; - } - - public function isOpen() - { - return $this->transport_->isOpen(); - } - - public function open() - { - $this->transport_->open(); - } - - public function close() - { - $this->transport_->close(); - } - - /** - * Reads from the buffer. When more data is required reads another entire - * chunk and serves future reads out of that. - * - * @param int $len How much data - */ - public function read($len) - { - if (!$this->read_) { - return $this->transport_->read($len); - } - - if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { - $this->readFrame(); - } - - // Just return full buff - if ($len >= TStringFuncFactory::create()->strlen($this->rBuf_)) { - $out = $this->rBuf_; - $this->rBuf_ = null; - - return $out; - } - - // Return TStringFuncFactory::create()->substr - $out = TStringFuncFactory::create()->substr($this->rBuf_, 0, $len); - $this->rBuf_ = TStringFuncFactory::create()->substr($this->rBuf_, $len); - - return $out; - } - - /** - * Put previously read data back into the buffer - * - * @param string $data data to return - */ - public function putBack($data) - { - if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { - $this->rBuf_ = $data; - } else { - $this->rBuf_ = ($data . $this->rBuf_); - } - } - - /** - * Reads a chunk of data into the internal read buffer. - */ - private function readFrame() - { - $buf = $this->transport_->readAll(4); - $val = unpack('N', $buf); - $sz = $val[1]; - - $this->rBuf_ = $this->transport_->readAll($sz); - } - - /** - * Writes some data to the pending output buffer. - * - * @param string $buf The data - * @param int $len Limit of bytes to write - */ - public function write($buf, $len=null) - { - if (!$this->write_) { - return $this->transport_->write($buf, $len); - } - - if ($len !== null && $len < TStringFuncFactory::create()->strlen($buf)) { - $buf = TStringFuncFactory::create()->substr($buf, 0, $len); - } - $this->wBuf_ .= $buf; - } - - /** - * Writes the output buffer to the stream in the format of a 4-byte length - * followed by the actual data. - */ - public function flush() - { - if (!$this->write_ || TStringFuncFactory::create()->strlen($this->wBuf_) == 0) { - return $this->transport_->flush(); - } - - $out = pack('N', TStringFuncFactory::create()->strlen($this->wBuf_)); - $out .= $this->wBuf_; - - // Note that we clear the internal wBuf_ prior to the underlying write - // to ensure we're in a sane state (i.e. internal buffer cleaned) - // if the underlying write throws up an exception - $this->wBuf_ = ''; - $this->transport_->write($out); - $this->transport_->flush(); - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/THttpClient.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/THttpClient.php deleted file mode 100644 index b372ab74a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/THttpClient.php +++ /dev/null @@ -1,229 +0,0 @@ -strlen($uri) > 0) && ($uri{0} != '/')) { - $uri = '/'.$uri; - } - $this->scheme_ = $scheme; - $this->host_ = $host; - $this->port_ = $port; - $this->uri_ = $uri; - $this->buf_ = ''; - $this->handle_ = null; - $this->timeout_ = null; - $this->headers_ = array(); - } - - /** - * Set read timeout - * - * @param float $timeout - */ - public function setTimeoutSecs($timeout) - { - $this->timeout_ = $timeout; - } - - /** - * Whether this transport is open. - * - * @return boolean true if open - */ - public function isOpen() - { - return true; - } - - /** - * Open the transport for reading/writing - * - * @throws TTransportException if cannot open - */ - public function open() {} - - /** - * Close the transport. - */ - public function close() - { - if ($this->handle_) { - @fclose($this->handle_); - $this->handle_ = null; - } - } - - /** - * Read some data into the array. - * - * @param int $len How much to read - * @return string The data that has been read - * @throws TTransportException if cannot read any more data - */ - public function read($len) - { - $data = @fread($this->handle_, $len); - if ($data === FALSE || $data === '') { - $md = stream_get_meta_data($this->handle_); - if ($md['timed_out']) { - throw new TTransportException('THttpClient: timed out reading '.$len.' bytes from '.$this->host_.':'.$this->port_.$this->uri_, TTransportException::TIMED_OUT); - } else { - throw new TTransportException('THttpClient: Could not read '.$len.' bytes from '.$this->host_.':'.$this->port_.$this->uri_, TTransportException::UNKNOWN); - } - } - - return $data; - } - - /** - * Writes some data into the pending buffer - * - * @param string $buf The data to write - * @throws TTransportException if writing fails - */ - public function write($buf) - { - $this->buf_ .= $buf; - } - - /** - * Opens and sends the actual request over the HTTP connection - * - * @throws TTransportException if a writing error occurs - */ - public function flush() - { - // God, PHP really has some esoteric ways of doing simple things. - $host = $this->host_.($this->port_ != 80 ? ':'.$this->port_ : ''); - - $headers = array(); - $defaultHeaders = array('Host' => $host, - 'Accept' => 'application/x-thrift', - 'User-Agent' => 'PHP/THttpClient', - 'Content-Type' => 'application/x-thrift', - 'Content-Length' => TStringFuncFactory::create()->strlen($this->buf_)); - foreach (array_merge($defaultHeaders, $this->headers_) as $key => $value) { - $headers[] = "$key: $value"; - } - - $options = array('method' => 'POST', - 'header' => implode("\r\n", $headers), - 'max_redirects' => 1, - 'content' => $this->buf_); - if ($this->timeout_ > 0) { - $options['timeout'] = $this->timeout_; - } - $this->buf_ = ''; - - $contextid = stream_context_create(array('http' => $options)); - $this->handle_ = @fopen($this->scheme_.'://'.$host.$this->uri_, 'r', false, $contextid); - - // Connect failed? - if ($this->handle_ === FALSE) { - $this->handle_ = null; - $error = 'THttpClient: Could not connect to '.$host.$this->uri_; - throw new TTransportException($error, TTransportException::NOT_OPEN); - } - } - - public function addHeaders($headers) - { - $this->headers_ = array_merge($this->headers_, $headers); - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TMemoryBuffer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TMemoryBuffer.php deleted file mode 100644 index ca31c579f..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TMemoryBuffer.php +++ /dev/null @@ -1,100 +0,0 @@ -buf_ = $buf; - } - - protected $buf_ = ''; - - public function isOpen() - { - return true; - } - - public function open() {} - - public function close() {} - - public function write($buf) - { - $this->buf_ .= $buf; - } - - public function read($len) - { - $bufLength = TStringFuncFactory::create()->strlen($this->buf_); - - if ($bufLength === 0) { - throw new TTransportException('TMemoryBuffer: Could not read ' . - $len . ' bytes from buffer.', - TTransportException::UNKNOWN); - } - - if ($bufLength <= $len) { - $ret = $this->buf_; - $this->buf_ = ''; - - return $ret; - } - - $ret = TStringFuncFactory::create()->substr($this->buf_, 0, $len); - $this->buf_ = TStringFuncFactory::create()->substr($this->buf_, $len); - - return $ret; - } - - public function getBuffer() - { - return $this->buf_; - } - - public function available() - { - return TStringFuncFactory::create()->strlen($this->buf_); - } - - public function putBack($data) - { - $this->buf_ = $data.$this->buf_; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TPhpStream.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TPhpStream.php deleted file mode 100644 index 4c14cdfe1..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TPhpStream.php +++ /dev/null @@ -1,123 +0,0 @@ -read_ = $mode & self::MODE_R; - $this->write_ = $mode & self::MODE_W; - } - - public function open() - { - if ($this->read_) { - $this->inStream_ = @fopen(self::inStreamName(), 'r'); - if (!is_resource($this->inStream_)) { - throw new TException('TPhpStream: Could not open php://input'); - } - } - if ($this->write_) { - $this->outStream_ = @fopen('php://output', 'w'); - if (!is_resource($this->outStream_)) { - throw new TException('TPhpStream: Could not open php://output'); - } - } - } - - public function close() - { - if ($this->read_) { - @fclose($this->inStream_); - $this->inStream_ = null; - } - if ($this->write_) { - @fclose($this->outStream_); - $this->outStream_ = null; - } - } - - public function isOpen() - { - return - (!$this->read_ || is_resource($this->inStream_)) && - (!$this->write_ || is_resource($this->outStream_)); - } - - public function read($len) - { - $data = @fread($this->inStream_, $len); - if ($data === FALSE || $data === '') { - throw new TException('TPhpStream: Could not read '.$len.' bytes'); - } - - return $data; - } - - public function write($buf) - { - while (TStringFuncFactory::create()->strlen($buf) > 0) { - $got = @fwrite($this->outStream_, $buf); - if ($got === 0 || $got === FALSE) { - throw new TException('TPhpStream: Could not write '.TStringFuncFactory::create()->strlen($buf).' bytes'); - } - $buf = TStringFuncFactory::create()->substr($buf, $got); - } - } - - public function flush() - { - @fflush($this->outStream_); - } - - private static function inStreamName() - { - if (php_sapi_name() == 'cli') { - return 'php://stdin'; - } - - return 'php://input'; - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSSLSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSSLSocket.php deleted file mode 100644 index 533b7bbfe..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSSLSocket.php +++ /dev/null @@ -1,112 +0,0 @@ -host_ = $this->getSSLHost($host); - $this->port_ = $port; - $this->context_ = $context; - $this->debugHandler_ = $debugHandler ? $debugHandler : 'error_log'; - } - - /** - * Creates a host name with SSL transport protocol - * if no transport protocol already specified in - * the host name. - * - * @param string $host Host to listen on - * @return string $host Host name with transport protocol - */ - private function getSSLHost($host) - { - $transport_protocol_loc = strpos($host, "://"); - if ($transport_protocol_loc === false) { - $host = 'ssl://'.$host; - } - return $host; - } - - /** - * Connects the socket. - */ - public function open() - { - if ($this->isOpen()) { - throw new TTransportException('Socket already connected', TTransportException::ALREADY_OPEN); - } - - if (empty($this->host_)) { - throw new TTransportException('Cannot open null host', TTransportException::NOT_OPEN); - } - - if ($this->port_ <= 0) { - throw new TTransportException('Cannot open without port', TTransportException::NOT_OPEN); - } - - $this->handle_ = @stream_socket_client($this->host_.':'.$this->port_, - $errno, - $errstr, - $this->sendTimeoutSec_ + ($this->sendTimeoutUsec_ / 1000000), - STREAM_CLIENT_CONNECT, - $this->context_); - - // Connect failed? - if ($this->handle_ === FALSE) { - $error = 'TSocket: Could not connect to '.$this->host_.':'.$this->port_.' ('.$errstr.' ['.$errno.'])'; - if ($this->debug_) { - call_user_func($this->debugHandler_, $error); - } - throw new TException($error); - } - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocket.php deleted file mode 100644 index a1872b98b..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocket.php +++ /dev/null @@ -1,340 +0,0 @@ -host_ = $host; - $this->port_ = $port; - $this->persist_ = $persist; - $this->debugHandler_ = $debugHandler ? $debugHandler : 'error_log'; - } - - /** - * @param resource $handle - * @return void - */ - public function setHandle($handle) - { - $this->handle_ = $handle; - } - - /** - * Sets the send timeout. - * - * @param int $timeout Timeout in milliseconds. - */ - public function setSendTimeout($timeout) - { - $this->sendTimeoutSec_ = floor($timeout / 1000); - $this->sendTimeoutUsec_ = - ($timeout - ($this->sendTimeoutSec_ * 1000)) * 1000; - } - - /** - * Sets the receive timeout. - * - * @param int $timeout Timeout in milliseconds. - */ - public function setRecvTimeout($timeout) - { - $this->recvTimeoutSec_ = floor($timeout / 1000); - $this->recvTimeoutUsec_ = - ($timeout - ($this->recvTimeoutSec_ * 1000)) * 1000; - } - - /** - * Sets debugging output on or off - * - * @param bool $debug - */ - public function setDebug($debug) - { - $this->debug_ = $debug; - } - - /** - * Get the host that this socket is connected to - * - * @return string host - */ - public function getHost() - { - return $this->host_; - } - - /** - * Get the remote port that this socket is connected to - * - * @return int port - */ - public function getPort() - { - return $this->port_; - } - - /** - * Tests whether this is open - * - * @return bool true if the socket is open - */ - public function isOpen() - { - return is_resource($this->handle_); - } - - /** - * Connects the socket. - */ - public function open() - { - if ($this->isOpen()) { - throw new TTransportException('Socket already connected', TTransportException::ALREADY_OPEN); - } - - if (empty($this->host_)) { - throw new TTransportException('Cannot open null host', TTransportException::NOT_OPEN); - } - - if ($this->port_ <= 0) { - throw new TTransportException('Cannot open without port', TTransportException::NOT_OPEN); - } - - if ($this->persist_) { - $this->handle_ = @pfsockopen($this->host_, - $this->port_, - $errno, - $errstr, - $this->sendTimeoutSec_ + ($this->sendTimeoutUsec_ / 1000000)); - } else { - $this->handle_ = @fsockopen($this->host_, - $this->port_, - $errno, - $errstr, - $this->sendTimeoutSec_ + ($this->sendTimeoutUsec_ / 1000000)); - } - - // Connect failed? - if ($this->handle_ === FALSE) { - $error = 'TSocket: Could not connect to '.$this->host_.':'.$this->port_.' ('.$errstr.' ['.$errno.'])'; - if ($this->debug_) { - call_user_func($this->debugHandler_, $error); - } - throw new TException($error); - } - - if (function_exists('socket_import_stream') && function_exists('socket_set_option')) { - $socket = socket_import_stream($this->handle_); - socket_set_option($socket, SOL_TCP, TCP_NODELAY, 1); - } - } - - /** - * Closes the socket. - */ - public function close() - { - @fclose($this->handle_); - $this->handle_ = null; - } - - /** - * Read from the socket at most $len bytes. - * - * This method will not wait for all the requested data, it will return as - * soon as any data is received. - * - * @param int $len Maximum number of bytes to read. - * @return string Binary data - */ - public function read($len) - { - $null = null; - $read = array($this->handle_); - $readable = @stream_select($read, $null, $null, $this->recvTimeoutSec_, $this->recvTimeoutUsec_); - - if ($readable > 0) { - $data = fread($this->handle_, $len); - if ($data === false) { - throw new TTransportException('TSocket: Could not read '.$len.' bytes from '. - $this->host_.':'.$this->port_); - } elseif ($data == '' && feof($this->handle_)) { - throw new TTransportException('TSocket read 0 bytes'); - } - - return $data; - } elseif ($readable === 0) { - throw new TTransportException('TSocket: timed out reading '.$len.' bytes from '. - $this->host_.':'.$this->port_); - } else { - throw new TTransportException('TSocket: Could not read '.$len.' bytes from '. - $this->host_.':'.$this->port_); - } - } - - /** - * Write to the socket. - * - * @param string $buf The data to write - */ - public function write($buf) - { - $null = null; - $write = array($this->handle_); - - // keep writing until all the data has been written - while (TStringFuncFactory::create()->strlen($buf) > 0) { - // wait for stream to become available for writing - $writable = @stream_select($null, $write, $null, $this->sendTimeoutSec_, $this->sendTimeoutUsec_); - if ($writable > 0) { - // write buffer to stream - $written = fwrite($this->handle_, $buf); - if ($written === -1 || $written === false) { - throw new TTransportException('TSocket: Could not write '.TStringFuncFactory::create()->strlen($buf).' bytes '. - $this->host_.':'.$this->port_); - } - // determine how much of the buffer is left to write - $buf = TStringFuncFactory::create()->substr($buf, $written); - } elseif ($writable === 0) { - throw new TTransportException('TSocket: timed out writing '.TStringFuncFactory::create()->strlen($buf).' bytes from '. - $this->host_.':'.$this->port_); - } else { - throw new TTransportException('TSocket: Could not write '.TStringFuncFactory::create()->strlen($buf).' bytes '. - $this->host_.':'.$this->port_); - } - } - } - - /** - * Flush output to the socket. - * - * Since read(), readAll() and write() operate on the sockets directly, - * this is a no-op - * - * If you wish to have flushable buffering behaviour, wrap this TSocket - * in a TBufferedTransport. - */ - public function flush() - { - // no-op - } - } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocketPool.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocketPool.php deleted file mode 100644 index 18ffd8d94..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TSocketPool.php +++ /dev/null @@ -1,300 +0,0 @@ - $val) { - $ports[$key] = $port; - } - } - - foreach ($hosts as $key => $host) { - $this->servers_ []= array('host' => $host, - 'port' => $ports[$key]); - } - } - - /** - * Add a server to the pool - * - * This function does not prevent you from adding a duplicate server entry. - * - * @param string $host hostname or IP - * @param int $port port - */ - public function addServer($host, $port) - { - $this->servers_[] = array('host' => $host, 'port' => $port); - } - - /** - * Sets how many time to keep retrying a host in the connect function. - * - * @param int $numRetries - */ - public function setNumRetries($numRetries) - { - $this->numRetries_ = $numRetries; - } - - /** - * Sets how long to wait until retrying a host if it was marked down - * - * @param int $numRetries - */ - public function setRetryInterval($retryInterval) - { - $this->retryInterval_ = $retryInterval; - } - - /** - * Sets how many time to keep retrying a host before marking it as down. - * - * @param int $numRetries - */ - public function setMaxConsecutiveFailures($maxConsecutiveFailures) - { - $this->maxConsecutiveFailures_ = $maxConsecutiveFailures; - } - - /** - * Turns randomization in connect order on or off. - * - * @param bool $randomize - */ - public function setRandomize($randomize) - { - $this->randomize_ = $randomize; - } - - /** - * Whether to always try the last server. - * - * @param bool $alwaysTryLast - */ - public function setAlwaysTryLast($alwaysTryLast) - { - $this->alwaysTryLast_ = $alwaysTryLast; - } - - /** - * Connects the socket by iterating through all the servers in the pool - * and trying to find one that works. - */ - public function open() - { - // Check if we want order randomization - if ($this->randomize_) { - shuffle($this->servers_); - } - - // Count servers to identify the "last" one - $numServers = count($this->servers_); - - for ($i = 0; $i < $numServers; ++$i) { - - // This extracts the $host and $port variables - extract($this->servers_[$i]); - - // Check APC cache for a record of this server being down - $failtimeKey = 'thrift_failtime:'.$host.':'.$port.'~'; - - // Cache miss? Assume it's OK - $lastFailtime = apc_fetch($failtimeKey); - if ($lastFailtime === FALSE) { - $lastFailtime = 0; - } - - $retryIntervalPassed = false; - - // Cache hit...make sure enough the retry interval has elapsed - if ($lastFailtime > 0) { - $elapsed = time() - $lastFailtime; - if ($elapsed > $this->retryInterval_) { - $retryIntervalPassed = true; - if ($this->debug_) { - call_user_func($this->debugHandler_, - 'TSocketPool: retryInterval '. - '('.$this->retryInterval_.') '. - 'has passed for host '.$host.':'.$port); - } - } - } - - // Only connect if not in the middle of a fail interval, OR if this - // is the LAST server we are trying, just hammer away on it - $isLastServer = false; - if ($this->alwaysTryLast_) { - $isLastServer = ($i == ($numServers - 1)); - } - - if (($lastFailtime === 0) || - ($isLastServer) || - ($lastFailtime > 0 && $retryIntervalPassed)) { - - // Set underlying TSocket params to this one - $this->host_ = $host; - $this->port_ = $port; - - // Try up to numRetries_ connections per server - for ($attempt = 0; $attempt < $this->numRetries_; $attempt++) { - try { - // Use the underlying TSocket open function - parent::open(); - - // Only clear the failure counts if required to do so - if ($lastFailtime > 0) { - apc_store($failtimeKey, 0); - } - - // Successful connection, return now - return; - - } catch (TException $tx) { - // Connection failed - } - } - - // Mark failure of this host in the cache - $consecfailsKey = 'thrift_consecfails:'.$host.':'.$port.'~'; - - // Ignore cache misses - $consecfails = apc_fetch($consecfailsKey); - if ($consecfails === FALSE) { - $consecfails = 0; - } - - // Increment by one - $consecfails++; - - // Log and cache this failure - if ($consecfails >= $this->maxConsecutiveFailures_) { - if ($this->debug_) { - call_user_func($this->debugHandler_, - 'TSocketPool: marking '.$host.':'.$port. - ' as down for '.$this->retryInterval_.' secs '. - 'after '.$consecfails.' failed attempts.'); - } - // Store the failure time - apc_store($failtimeKey, time()); - - // Clear the count of consecutive failures - apc_store($consecfailsKey, 0); - } else { - apc_store($consecfailsKey, $consecfails); - } - } - } - - // Oh no; we failed them all. The system is totally ill! - $error = 'TSocketPool: All hosts in pool are down. '; - $hosts = array(); - foreach ($this->servers_ as $server) { - $hosts []= $server['host'].':'.$server['port']; - } - $hostlist = implode(',', $hosts); - $error .= '('.$hostlist.')'; - if ($this->debug_) { - call_user_func($this->debugHandler_, $error); - } - throw new TException($error); - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TTransport.php deleted file mode 100644 index 99c39ff37..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TTransport.php +++ /dev/null @@ -1,95 +0,0 @@ -read($len); - - $data = ''; - $got = 0; - while (($got = TStringFuncFactory::create()->strlen($data)) < $len) { - $data .= $this->read($len - $got); - } - - return $data; - } - - /** - * Writes the given data out. - * - * @param string $buf The data to write - * @throws TTransportException if writing fails - */ - abstract public function write($buf); - - /** - * Flushes any pending data out of a buffer - * - * @throws TTransportException if a writing error occurs - */ - public function flush() {} -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TBufferedTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TBufferedTransport.php new file mode 100644 index 000000000..253c5acfb --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TBufferedTransport.php @@ -0,0 +1,206 @@ +transport_ = $transport; + $this->rBufSize_ = $rBufSize; + $this->wBufSize_ = $wBufSize; + } + + public function isOpen() + { + return $this->transport_->isOpen(); + } + + /** + * @inheritdoc + * + * @throws TTransportException + */ + public function open() + { + $this->transport_->open(); + } + + public function close() + { + $this->transport_->close(); + } + + public function putBack($data) + { + if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { + $this->rBuf_ = $data; + } else { + $this->rBuf_ = ($data . $this->rBuf_); + } + } + + /** + * The reason that we customize readAll here is that the majority of PHP + * streams are already internally buffered by PHP. The socket stream, for + * example, buffers internally and blocks if you call read with $len greater + * than the amount of data available, unlike recv() in C. + * + * Therefore, use the readAll method of the wrapped transport inside + * the buffered readAll. + * + * @throws TTransportException + */ + public function readAll($len) + { + $have = TStringFuncFactory::create()->strlen($this->rBuf_); + if ($have == 0) { + $data = $this->transport_->readAll($len); + } elseif ($have < $len) { + $data = $this->rBuf_; + $this->rBuf_ = ''; + $data .= $this->transport_->readAll($len - $have); + } elseif ($have == $len) { + $data = $this->rBuf_; + $this->rBuf_ = ''; + } elseif ($have > $len) { + $data = TStringFuncFactory::create()->substr($this->rBuf_, 0, $len); + $this->rBuf_ = TStringFuncFactory::create()->substr($this->rBuf_, $len); + } + + return $data; + } + + /** + * @inheritdoc + * + * @param int $len + * @return string + * @throws TTransportException + */ + public function read($len) + { + if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { + $this->rBuf_ = $this->transport_->read($this->rBufSize_); + } + + if (TStringFuncFactory::create()->strlen($this->rBuf_) <= $len) { + $ret = $this->rBuf_; + $this->rBuf_ = ''; + + return $ret; + } + + $ret = TStringFuncFactory::create()->substr($this->rBuf_, 0, $len); + $this->rBuf_ = TStringFuncFactory::create()->substr($this->rBuf_, $len); + + return $ret; + } + + /** + * @inheritdoc + * + * @param string $buf + * @throws TTransportException + */ + public function write($buf) + { + $this->wBuf_ .= $buf; + if (TStringFuncFactory::create()->strlen($this->wBuf_) >= $this->wBufSize_) { + $out = $this->wBuf_; + + // Note that we clear the internal wBuf_ prior to the underlying write + // to ensure we're in a sane state (i.e. internal buffer cleaned) + // if the underlying write throws up an exception + $this->wBuf_ = ''; + $this->transport_->write($out); + } + } + + /** + * @inheritdoc + * + * @throws TTransportException + */ + public function flush() + { + if (TStringFuncFactory::create()->strlen($this->wBuf_) > 0) { + $out = $this->wBuf_; + + // Note that we clear the internal wBuf_ prior to the underlying write + // to ensure we're in a sane state (i.e. internal buffer cleaned) + // if the underlying write throws up an exception + $this->wBuf_ = ''; + $this->transport_->write($out); + } + $this->transport_->flush(); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TCurlClient.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TCurlClient.php new file mode 100644 index 000000000..f51fa88ea --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TCurlClient.php @@ -0,0 +1,248 @@ +strlen($uri) > 0) && ($uri{0} != '/')) { + $uri = '/' . $uri; + } + $this->scheme_ = $scheme; + $this->host_ = $host; + $this->port_ = $port; + $this->uri_ = $uri; + $this->request_ = ''; + $this->response_ = null; + $this->timeout_ = null; + $this->headers_ = array(); + } + + /** + * Set read timeout + * + * @param float $timeout + */ + public function setTimeoutSecs($timeout) + { + $this->timeout_ = $timeout; + } + + /** + * Whether this transport is open. + * + * @return boolean true if open + */ + public function isOpen() + { + return true; + } + + /** + * Open the transport for reading/writing + * + * @throws TTransportException if cannot open + */ + public function open() + { + } + + /** + * Close the transport. + */ + public function close() + { + $this->request_ = ''; + $this->response_ = null; + } + + /** + * Read some data into the array. + * + * @param int $len How much to read + * @return string The data that has been read + * @throws TTransportException if cannot read any more data + */ + public function read($len) + { + if ($len >= strlen($this->response_)) { + return $this->response_; + } else { + $ret = substr($this->response_, 0, $len); + $this->response_ = substr($this->response_, $len); + + return $ret; + } + } + + /** + * Writes some data into the pending buffer + * + * @param string $buf The data to write + * @throws TTransportException if writing fails + */ + public function write($buf) + { + $this->request_ .= $buf; + } + + /** + * Opens and sends the actual request over the HTTP connection + * + * @throws TTransportException if a writing error occurs + */ + public function flush() + { + if (!self::$curlHandle) { + register_shutdown_function(array('Thrift\\Transport\\TCurlClient', 'closeCurlHandle')); + self::$curlHandle = curl_init(); + curl_setopt(self::$curlHandle, CURLOPT_RETURNTRANSFER, true); + curl_setopt(self::$curlHandle, CURLOPT_BINARYTRANSFER, true); + curl_setopt(self::$curlHandle, CURLOPT_USERAGENT, 'PHP/TCurlClient'); + curl_setopt(self::$curlHandle, CURLOPT_CUSTOMREQUEST, 'POST'); + curl_setopt(self::$curlHandle, CURLOPT_FOLLOWLOCATION, true); + curl_setopt(self::$curlHandle, CURLOPT_MAXREDIRS, 1); + } + // God, PHP really has some esoteric ways of doing simple things. + $host = $this->host_ . ($this->port_ != 80 ? ':' . $this->port_ : ''); + $fullUrl = $this->scheme_ . "://" . $host . $this->uri_; + + $headers = array(); + $defaultHeaders = array('Accept' => 'application/x-thrift', + 'Content-Type' => 'application/x-thrift', + 'Content-Length' => TStringFuncFactory::create()->strlen($this->request_)); + foreach (array_merge($defaultHeaders, $this->headers_) as $key => $value) { + $headers[] = "$key: $value"; + } + + curl_setopt(self::$curlHandle, CURLOPT_HTTPHEADER, $headers); + + if ($this->timeout_ > 0) { + curl_setopt(self::$curlHandle, CURLOPT_TIMEOUT, $this->timeout_); + } + curl_setopt(self::$curlHandle, CURLOPT_POSTFIELDS, $this->request_); + $this->request_ = ''; + + curl_setopt(self::$curlHandle, CURLOPT_URL, $fullUrl); + $this->response_ = curl_exec(self::$curlHandle); + + // Connect failed? + if (!$this->response_) { + curl_close(self::$curlHandle); + self::$curlHandle = null; + $error = 'TCurlClient: Could not connect to ' . $fullUrl; + throw new TTransportException($error, TTransportException::NOT_OPEN); + } + } + + public static function closeCurlHandle() + { + try { + if (self::$curlHandle) { + curl_close(self::$curlHandle); + self::$curlHandle = null; + } + } catch (\Exception $x) { + error_log('There was an error closing the curl handle: ' . $x->getMessage()); + } + } + + public function addHeaders($headers) + { + $this->headers_ = array_merge($this->headers_, $headers); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TFramedTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TFramedTransport.php new file mode 100644 index 000000000..39d186987 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TFramedTransport.php @@ -0,0 +1,192 @@ +transport_ = $transport; + $this->read_ = $read; + $this->write_ = $write; + } + + public function isOpen() + { + return $this->transport_->isOpen(); + } + + public function open() + { + $this->transport_->open(); + } + + public function close() + { + $this->transport_->close(); + } + + /** + * Reads from the buffer. When more data is required reads another entire + * chunk and serves future reads out of that. + * + * @param int $len How much data + */ + public function read($len) + { + if (!$this->read_) { + return $this->transport_->read($len); + } + + if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { + $this->readFrame(); + } + + // Just return full buff + if ($len >= TStringFuncFactory::create()->strlen($this->rBuf_)) { + $out = $this->rBuf_; + $this->rBuf_ = null; + + return $out; + } + + // Return TStringFuncFactory::create()->substr + $out = TStringFuncFactory::create()->substr($this->rBuf_, 0, $len); + $this->rBuf_ = TStringFuncFactory::create()->substr($this->rBuf_, $len); + + return $out; + } + + /** + * Put previously read data back into the buffer + * + * @param string $data data to return + */ + public function putBack($data) + { + if (TStringFuncFactory::create()->strlen($this->rBuf_) === 0) { + $this->rBuf_ = $data; + } else { + $this->rBuf_ = ($data . $this->rBuf_); + } + } + + /** + * Reads a chunk of data into the internal read buffer. + */ + private function readFrame() + { + $buf = $this->transport_->readAll(4); + $val = unpack('N', $buf); + $sz = $val[1]; + + $this->rBuf_ = $this->transport_->readAll($sz); + } + + /** + * Writes some data to the pending output buffer. + * + * @param string $buf The data + * @param int $len Limit of bytes to write + */ + public function write($buf, $len = null) + { + if (!$this->write_) { + return $this->transport_->write($buf, $len); + } + + if ($len !== null && $len < TStringFuncFactory::create()->strlen($buf)) { + $buf = TStringFuncFactory::create()->substr($buf, 0, $len); + } + $this->wBuf_ .= $buf; + } + + /** + * Writes the output buffer to the stream in the format of a 4-byte length + * followed by the actual data. + */ + public function flush() + { + if (!$this->write_ || TStringFuncFactory::create()->strlen($this->wBuf_) == 0) { + return $this->transport_->flush(); + } + + $out = pack('N', TStringFuncFactory::create()->strlen($this->wBuf_)); + $out .= $this->wBuf_; + + // Note that we clear the internal wBuf_ prior to the underlying write + // to ensure we're in a sane state (i.e. internal buffer cleaned) + // if the underlying write throws up an exception + $this->wBuf_ = ''; + $this->transport_->write($out); + $this->transport_->flush(); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/THttpClient.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/THttpClient.php new file mode 100644 index 000000000..a89794b08 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/THttpClient.php @@ -0,0 +1,243 @@ +strlen($uri) > 0) && ($uri{0} != '/')) { + $uri = '/' . $uri; + } + $this->scheme_ = $scheme; + $this->host_ = $host; + $this->port_ = $port; + $this->uri_ = $uri; + $this->buf_ = ''; + $this->handle_ = null; + $this->timeout_ = null; + $this->headers_ = array(); + } + + /** + * Set read timeout + * + * @param float $timeout + */ + public function setTimeoutSecs($timeout) + { + $this->timeout_ = $timeout; + } + + /** + * Whether this transport is open. + * + * @return boolean true if open + */ + public function isOpen() + { + return true; + } + + /** + * Open the transport for reading/writing + * + * @throws TTransportException if cannot open + */ + public function open() + { + } + + /** + * Close the transport. + */ + public function close() + { + if ($this->handle_) { + @fclose($this->handle_); + $this->handle_ = null; + } + } + + /** + * Read some data into the array. + * + * @param int $len How much to read + * @return string The data that has been read + * @throws TTransportException if cannot read any more data + */ + public function read($len) + { + $data = @fread($this->handle_, $len); + if ($data === false || $data === '') { + $md = stream_get_meta_data($this->handle_); + if ($md['timed_out']) { + throw new TTransportException( + 'THttpClient: timed out reading ' . $len . ' bytes from ' . + $this->host_ . ':' . $this->port_ . $this->uri_, + TTransportException::TIMED_OUT + ); + } else { + throw new TTransportException( + 'THttpClient: Could not read ' . $len . ' bytes from ' . + $this->host_ . ':' . $this->port_ . $this->uri_, + TTransportException::UNKNOWN + ); + } + } + + return $data; + } + + /** + * Writes some data into the pending buffer + * + * @param string $buf The data to write + * @throws TTransportException if writing fails + */ + public function write($buf) + { + $this->buf_ .= $buf; + } + + /** + * Opens and sends the actual request over the HTTP connection + * + * @throws TTransportException if a writing error occurs + */ + public function flush() + { + // God, PHP really has some esoteric ways of doing simple things. + $host = $this->host_ . ($this->port_ != 80 ? ':' . $this->port_ : ''); + + $headers = array(); + $defaultHeaders = array('Host' => $host, + 'Accept' => 'application/x-thrift', + 'User-Agent' => 'PHP/THttpClient', + 'Content-Type' => 'application/x-thrift', + 'Content-Length' => TStringFuncFactory::create()->strlen($this->buf_)); + foreach (array_merge($defaultHeaders, $this->headers_) as $key => $value) { + $headers[] = "$key: $value"; + } + + $options = array('method' => 'POST', + 'header' => implode("\r\n", $headers), + 'max_redirects' => 1, + 'content' => $this->buf_); + if ($this->timeout_ > 0) { + $options['timeout'] = $this->timeout_; + } + $this->buf_ = ''; + + $contextid = stream_context_create(array('http' => $options)); + $this->handle_ = @fopen( + $this->scheme_ . '://' . $host . $this->uri_, + 'r', + false, + $contextid + ); + + // Connect failed? + if ($this->handle_ === false) { + $this->handle_ = null; + $error = 'THttpClient: Could not connect to ' . $host . $this->uri_; + throw new TTransportException($error, TTransportException::NOT_OPEN); + } + } + + public function addHeaders($headers) + { + $this->headers_ = array_merge($this->headers_, $headers); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TMemoryBuffer.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TMemoryBuffer.php new file mode 100644 index 000000000..fee03a2a4 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TMemoryBuffer.php @@ -0,0 +1,106 @@ +buf_ = $buf; + } + + protected $buf_ = ''; + + public function isOpen() + { + return true; + } + + public function open() + { + } + + public function close() + { + } + + public function write($buf) + { + $this->buf_ .= $buf; + } + + public function read($len) + { + $bufLength = TStringFuncFactory::create()->strlen($this->buf_); + + if ($bufLength === 0) { + throw new TTransportException( + 'TMemoryBuffer: Could not read ' . + $len . ' bytes from buffer.', + TTransportException::UNKNOWN + ); + } + + if ($bufLength <= $len) { + $ret = $this->buf_; + $this->buf_ = ''; + + return $ret; + } + + $ret = TStringFuncFactory::create()->substr($this->buf_, 0, $len); + $this->buf_ = TStringFuncFactory::create()->substr($this->buf_, $len); + + return $ret; + } + + public function getBuffer() + { + return $this->buf_; + } + + public function available() + { + return TStringFuncFactory::create()->strlen($this->buf_); + } + + public function putBack($data) + { + $this->buf_ = $data . $this->buf_; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TNullTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TNullTransport.php similarity index 78% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TNullTransport.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TNullTransport.php index feeb7a468..7e086b67c 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Transport/TNullTransport.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TNullTransport.php @@ -32,20 +32,25 @@ use Thrift\Exception\TTransportException; */ class TNullTransport extends TTransport { - public function isOpen() - { - return true; - } + public function isOpen() + { + return true; + } - public function open() {} + public function open() + { + } - public function close() {} + public function close() + { + } - public function read($len) - { - throw new TTransportException("Can't read from TNullTransport."); - } - - public function write($buf) {} + public function read($len) + { + throw new TTransportException("Can't read from TNullTransport."); + } + public function write($buf) + { + } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TPhpStream.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TPhpStream.php new file mode 100644 index 000000000..42823ff33 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TPhpStream.php @@ -0,0 +1,124 @@ +read_ = $mode & self::MODE_R; + $this->write_ = $mode & self::MODE_W; + } + + public function open() + { + if ($this->read_) { + $this->inStream_ = @fopen(self::inStreamName(), 'r'); + if (!is_resource($this->inStream_)) { + throw new TException('TPhpStream: Could not open php://input'); + } + } + if ($this->write_) { + $this->outStream_ = @fopen('php://output', 'w'); + if (!is_resource($this->outStream_)) { + throw new TException('TPhpStream: Could not open php://output'); + } + } + } + + public function close() + { + if ($this->read_) { + @fclose($this->inStream_); + $this->inStream_ = null; + } + if ($this->write_) { + @fclose($this->outStream_); + $this->outStream_ = null; + } + } + + public function isOpen() + { + return + (!$this->read_ || is_resource($this->inStream_)) && + (!$this->write_ || is_resource($this->outStream_)); + } + + public function read($len) + { + $data = @fread($this->inStream_, $len); + if ($data === false || $data === '') { + throw new TException('TPhpStream: Could not read ' . $len . ' bytes'); + } + + return $data; + } + + public function write($buf) + { + while (TStringFuncFactory::create()->strlen($buf) > 0) { + $got = @fwrite($this->outStream_, $buf); + if ($got === 0 || $got === false) { + throw new TException( + 'TPhpStream: Could not write ' . TStringFuncFactory::create()->strlen($buf) . ' bytes' + ); + } + $buf = TStringFuncFactory::create()->substr($buf, $got); + } + } + + public function flush() + { + @fflush($this->outStream_); + } + + private static function inStreamName() + { + if (php_sapi_name() == 'cli') { + return 'php://stdin'; + } + + return 'php://input'; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSSLSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSSLSocket.php new file mode 100644 index 000000000..b4a0adb54 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSSLSocket.php @@ -0,0 +1,117 @@ +host_ = $this->getSSLHost($host); + $this->port_ = $port; + $this->context_ = $context; + $this->debugHandler_ = $debugHandler ? $debugHandler : 'error_log'; + } + + /** + * Creates a host name with SSL transport protocol + * if no transport protocol already specified in + * the host name. + * + * @param string $host Host to listen on + * @return string $host Host name with transport protocol + */ + private function getSSLHost($host) + { + $transport_protocol_loc = strpos($host, "://"); + if ($transport_protocol_loc === false) { + $host = 'ssl://' . $host; + } + return $host; + } + + /** + * Connects the socket. + */ + public function open() + { + if ($this->isOpen()) { + throw new TTransportException('Socket already connected', TTransportException::ALREADY_OPEN); + } + + if (empty($this->host_)) { + throw new TTransportException('Cannot open null host', TTransportException::NOT_OPEN); + } + + if ($this->port_ <= 0) { + throw new TTransportException('Cannot open without port', TTransportException::NOT_OPEN); + } + + $this->handle_ = @stream_socket_client( + $this->host_ . ':' . $this->port_, + $errno, + $errstr, + $this->sendTimeoutSec_ + ($this->sendTimeoutUsec_ / 1000000), + STREAM_CLIENT_CONNECT, + $this->context_ + ); + + // Connect failed? + if ($this->handle_ === false) { + $error = 'TSocket: Could not connect to ' . + $this->host_ . ':' . $this->port_ . ' (' . $errstr . ' [' . $errno . '])'; + if ($this->debug_) { + call_user_func($this->debugHandler_, $error); + } + throw new TException($error); + } + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocket.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocket.php new file mode 100644 index 000000000..5147efa63 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocket.php @@ -0,0 +1,366 @@ +host_ = $host; + $this->port_ = $port; + $this->persist_ = $persist; + $this->debugHandler_ = $debugHandler ? $debugHandler : 'error_log'; + } + + /** + * @param resource $handle + * @return void + */ + public function setHandle($handle) + { + $this->handle_ = $handle; + stream_set_blocking($this->handle_, false); + } + + /** + * Sets the send timeout. + * + * @param int $timeout Timeout in milliseconds. + */ + public function setSendTimeout($timeout) + { + $this->sendTimeoutSec_ = floor($timeout / 1000); + $this->sendTimeoutUsec_ = + ($timeout - ($this->sendTimeoutSec_ * 1000)) * 1000; + } + + /** + * Sets the receive timeout. + * + * @param int $timeout Timeout in milliseconds. + */ + public function setRecvTimeout($timeout) + { + $this->recvTimeoutSec_ = floor($timeout / 1000); + $this->recvTimeoutUsec_ = + ($timeout - ($this->recvTimeoutSec_ * 1000)) * 1000; + } + + /** + * Sets debugging output on or off + * + * @param bool $debug + */ + public function setDebug($debug) + { + $this->debug_ = $debug; + } + + /** + * Get the host that this socket is connected to + * + * @return string host + */ + public function getHost() + { + return $this->host_; + } + + /** + * Get the remote port that this socket is connected to + * + * @return int port + */ + public function getPort() + { + return $this->port_; + } + + /** + * Tests whether this is open + * + * @return bool true if the socket is open + */ + public function isOpen() + { + return is_resource($this->handle_); + } + + /** + * Connects the socket. + */ + public function open() + { + if ($this->isOpen()) { + throw new TTransportException('Socket already connected', TTransportException::ALREADY_OPEN); + } + + if (empty($this->host_)) { + throw new TTransportException('Cannot open null host', TTransportException::NOT_OPEN); + } + + if ($this->port_ <= 0) { + throw new TTransportException('Cannot open without port', TTransportException::NOT_OPEN); + } + + if ($this->persist_) { + $this->handle_ = @pfsockopen( + $this->host_, + $this->port_, + $errno, + $errstr, + $this->sendTimeoutSec_ + ($this->sendTimeoutUsec_ / 1000000) + ); + } else { + $this->handle_ = @fsockopen( + $this->host_, + $this->port_, + $errno, + $errstr, + $this->sendTimeoutSec_ + ($this->sendTimeoutUsec_ / 1000000) + ); + } + + // Connect failed? + if ($this->handle_ === false) { + $error = 'TSocket: Could not connect to ' . + $this->host_ . ':' . $this->port_ . ' (' . $errstr . ' [' . $errno . '])'; + if ($this->debug_) { + call_user_func($this->debugHandler_, $error); + } + throw new TException($error); + } + + if (function_exists('socket_import_stream') && function_exists('socket_set_option')) { + $socket = socket_import_stream($this->handle_); + socket_set_option($socket, SOL_TCP, TCP_NODELAY, 1); + } + } + + /** + * Closes the socket. + */ + public function close() + { + @fclose($this->handle_); + $this->handle_ = null; + } + + /** + * Read from the socket at most $len bytes. + * + * This method will not wait for all the requested data, it will return as + * soon as any data is received. + * + * @param int $len Maximum number of bytes to read. + * @return string Binary data + */ + public function read($len) + { + $null = null; + $read = array($this->handle_); + $readable = @stream_select( + $read, + $null, + $null, + $this->recvTimeoutSec_, + $this->recvTimeoutUsec_ + ); + + if ($readable > 0) { + $data = fread($this->handle_, $len); + if ($data === false) { + throw new TTransportException('TSocket: Could not read ' . $len . ' bytes from ' . + $this->host_ . ':' . $this->port_); + } elseif ($data == '' && feof($this->handle_)) { + throw new TTransportException('TSocket read 0 bytes'); + } + + return $data; + } elseif ($readable === 0) { + throw new TTransportException('TSocket: timed out reading ' . $len . ' bytes from ' . + $this->host_ . ':' . $this->port_); + } else { + throw new TTransportException('TSocket: Could not read ' . $len . ' bytes from ' . + $this->host_ . ':' . $this->port_); + } + } + + /** + * Write to the socket. + * + * @param string $buf The data to write + */ + public function write($buf) + { + $null = null; + $write = array($this->handle_); + + // keep writing until all the data has been written + while (TStringFuncFactory::create()->strlen($buf) > 0) { + // wait for stream to become available for writing + $writable = @stream_select( + $null, + $write, + $null, + $this->sendTimeoutSec_, + $this->sendTimeoutUsec_ + ); + if ($writable > 0) { + // write buffer to stream + $written = fwrite($this->handle_, $buf); + if ($written === -1 || $written === false) { + throw new TTransportException( + 'TSocket: Could not write ' . TStringFuncFactory::create()->strlen($buf) . ' bytes ' . + $this->host_ . ':' . $this->port_ + ); + } + // determine how much of the buffer is left to write + $buf = TStringFuncFactory::create()->substr($buf, $written); + } elseif ($writable === 0) { + throw new TTransportException( + 'TSocket: timed out writing ' . TStringFuncFactory::create()->strlen($buf) . ' bytes from ' . + $this->host_ . ':' . $this->port_ + ); + } else { + throw new TTransportException( + 'TSocket: Could not write ' . TStringFuncFactory::create()->strlen($buf) . ' bytes ' . + $this->host_ . ':' . $this->port_ + ); + } + } + } + + /** + * Flush output to the socket. + * + * Since read(), readAll() and write() operate on the sockets directly, + * this is a no-op + * + * If you wish to have flushable buffering behaviour, wrap this TSocket + * in a TBufferedTransport. + */ + public function flush() + { + // no-op + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocketPool.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocketPool.php new file mode 100644 index 000000000..cb9e8ddfa --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TSocketPool.php @@ -0,0 +1,310 @@ + $val) { + $ports[$key] = $port; + } + } + + foreach ($hosts as $key => $host) { + $this->servers_ [] = array('host' => $host, + 'port' => $ports[$key]); + } + } + + /** + * Add a server to the pool + * + * This function does not prevent you from adding a duplicate server entry. + * + * @param string $host hostname or IP + * @param int $port port + */ + public function addServer($host, $port) + { + $this->servers_[] = array('host' => $host, 'port' => $port); + } + + /** + * Sets how many time to keep retrying a host in the connect function. + * + * @param int $numRetries + */ + public function setNumRetries($numRetries) + { + $this->numRetries_ = $numRetries; + } + + /** + * Sets how long to wait until retrying a host if it was marked down + * + * @param int $numRetries + */ + public function setRetryInterval($retryInterval) + { + $this->retryInterval_ = $retryInterval; + } + + /** + * Sets how many time to keep retrying a host before marking it as down. + * + * @param int $numRetries + */ + public function setMaxConsecutiveFailures($maxConsecutiveFailures) + { + $this->maxConsecutiveFailures_ = $maxConsecutiveFailures; + } + + /** + * Turns randomization in connect order on or off. + * + * @param bool $randomize + */ + public function setRandomize($randomize) + { + $this->randomize_ = $randomize; + } + + /** + * Whether to always try the last server. + * + * @param bool $alwaysTryLast + */ + public function setAlwaysTryLast($alwaysTryLast) + { + $this->alwaysTryLast_ = $alwaysTryLast; + } + + /** + * Connects the socket by iterating through all the servers in the pool + * and trying to find one that works. + */ + public function open() + { + // Check if we want order randomization + if ($this->randomize_) { + shuffle($this->servers_); + } + + // Count servers to identify the "last" one + $numServers = count($this->servers_); + + for ($i = 0; $i < $numServers; ++$i) { + // This extracts the $host and $port variables + extract($this->servers_[$i]); + + // Check APC cache for a record of this server being down + $failtimeKey = 'thrift_failtime:' . $host . ':' . $port . '~'; + + // Cache miss? Assume it's OK + $lastFailtime = apc_fetch($failtimeKey); + if ($lastFailtime === false) { + $lastFailtime = 0; + } + + $retryIntervalPassed = false; + + // Cache hit...make sure enough the retry interval has elapsed + if ($lastFailtime > 0) { + $elapsed = time() - $lastFailtime; + if ($elapsed > $this->retryInterval_) { + $retryIntervalPassed = true; + if ($this->debug_) { + call_user_func( + $this->debugHandler_, + 'TSocketPool: retryInterval ' . + '(' . $this->retryInterval_ . ') ' . + 'has passed for host ' . $host . ':' . $port + ); + } + } + } + + // Only connect if not in the middle of a fail interval, OR if this + // is the LAST server we are trying, just hammer away on it + $isLastServer = false; + if ($this->alwaysTryLast_) { + $isLastServer = ($i == ($numServers - 1)); + } + + if (($lastFailtime === 0) || + ($isLastServer) || + ($lastFailtime > 0 && $retryIntervalPassed)) { + // Set underlying TSocket params to this one + $this->host_ = $host; + $this->port_ = $port; + + // Try up to numRetries_ connections per server + for ($attempt = 0; $attempt < $this->numRetries_; $attempt++) { + try { + // Use the underlying TSocket open function + parent::open(); + + // Only clear the failure counts if required to do so + if ($lastFailtime > 0) { + apc_store($failtimeKey, 0); + } + + // Successful connection, return now + return; + } catch (TException $tx) { + // Connection failed + } + } + + // Mark failure of this host in the cache + $consecfailsKey = 'thrift_consecfails:' . $host . ':' . $port . '~'; + + // Ignore cache misses + $consecfails = apc_fetch($consecfailsKey); + if ($consecfails === false) { + $consecfails = 0; + } + + // Increment by one + $consecfails++; + + // Log and cache this failure + if ($consecfails >= $this->maxConsecutiveFailures_) { + if ($this->debug_) { + call_user_func( + $this->debugHandler_, + 'TSocketPool: marking ' . $host . ':' . $port . + ' as down for ' . $this->retryInterval_ . ' secs ' . + 'after ' . $consecfails . ' failed attempts.' + ); + } + // Store the failure time + apc_store($failtimeKey, time()); + + // Clear the count of consecutive failures + apc_store($consecfailsKey, 0); + } else { + apc_store($consecfailsKey, $consecfails); + } + } + } + + // Oh no; we failed them all. The system is totally ill! + $error = 'TSocketPool: All hosts in pool are down. '; + $hosts = array(); + foreach ($this->servers_ as $server) { + $hosts [] = $server['host'] . ':' . $server['port']; + } + $hostlist = implode(',', $hosts); + $error .= '(' . $hostlist . ')'; + if ($this->debug_) { + call_user_func($this->debugHandler_, $error); + } + throw new TException($error); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TTransport.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TTransport.php new file mode 100644 index 000000000..35921c666 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Transport/TTransport.php @@ -0,0 +1,98 @@ +read($len); + + $data = ''; + $got = 0; + while (($got = TStringFuncFactory::create()->strlen($data)) < $len) { + $data .= $this->read($len - $got); + } + + return $data; + } + + /** + * Writes the given data out. + * + * @param string $buf The data to write + * @throws TTransportException if writing fails + */ + abstract public function write($buf); + + /** + * Flushes any pending data out of a buffer + * + * @throws TTransportException if a writing error occurs + */ + public function flush() + { + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TConstant.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Type/TConstant.php similarity index 91% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TConstant.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Type/TConstant.php index 7c8eceb03..215da4a3d 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TConstant.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Type/TConstant.php @@ -30,7 +30,9 @@ abstract class TConstant /** * Don't instanciate this class */ - protected function __construct() {} + protected function __construct() + { + } /** * Get a constant value @@ -41,8 +43,8 @@ abstract class TConstant { if (is_null(static::$$constant)) { static::$$constant = call_user_func( - sprintf('static::init_%s', $constant) - ); + sprintf('static::init_%s', $constant) + ); } return static::$$constant; diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TMessageType.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Type/TMessageType.php similarity index 91% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TMessageType.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Type/TMessageType.php index bff224f88..dc9ae6284 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TMessageType.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Type/TMessageType.php @@ -27,8 +27,8 @@ namespace Thrift\Type; */ class TMessageType { - const CALL = 1; - const REPLY = 2; - const EXCEPTION = 3; - const ONEWAY = 4; + const CALL = 1; + const REPLY = 2; + const EXCEPTION = 3; + const ONEWAY = 4; } diff --git a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TType.php b/vendor/git.apache.org/thrift.git/lib/php/lib/Type/TType.php similarity index 68% rename from vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TType.php rename to vendor/git.apache.org/thrift.git/lib/php/lib/Type/TType.php index 71219c249..3fdb15f53 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/lib/Thrift/Type/TType.php +++ b/vendor/git.apache.org/thrift.git/lib/php/lib/Type/TType.php @@ -27,21 +27,21 @@ namespace Thrift\Type; */ class TType { - const STOP = 0; - const VOID = 1; - const BOOL = 2; - const BYTE = 3; - const I08 = 3; - const DOUBLE = 4; - const I16 = 6; - const I32 = 8; - const I64 = 10; - const STRING = 11; - const UTF7 = 11; - const STRUCT = 12; - const MAP = 13; - const SET = 14; - const LST = 15; // N.B. cannot use LIST keyword in PHP! - const UTF8 = 16; - const UTF16 = 17; + const STOP = 0; + const VOID = 1; + const BOOL = 2; + const BYTE = 3; + const I08 = 3; + const DOUBLE = 4; + const I16 = 6; + const I32 = 8; + const I64 = 10; + const STRING = 11; + const UTF7 = 11; + const STRUCT = 12; + const MAP = 13; + const SET = 14; + const LST = 15; // N.B. cannot use LIST keyword in PHP! + const UTF8 = 16; + const UTF16 = 17; } diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Fixtures.php b/vendor/git.apache.org/thrift.git/lib/php/test/Fixtures.php new file mode 100644 index 000000000..fd57d831c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Fixtures.php @@ -0,0 +1,194 @@ +<><"; + + self::$testArgs['testString3'] = + "string that ends in double-backslash \\\\"; + + self::$testArgs['testUnicodeStringWithNonBMP'] = + "สวัสดี/𝒯"; + + self::$testArgs['testDouble'] = 3.1415926535898; + + // TODO: add testBinary() call + + self::$testArgs['testByte'] = 0x01; + + self::$testArgs['testI32'] = pow(2, 30); + + if (PHP_INT_SIZE == 8) { + self::$testArgs['testI64'] = pow(2, 60); + } else { + self::$testArgs['testI64'] = "1152921504606847000"; + } + + self::$testArgs['testStruct'] = + new Xtruct( + array( + 'string_thing' => 'worked', + 'byte_thing' => 0x01, + 'i32_thing' => pow(2, 30), + 'i64_thing' => self::$testArgs['testI64'] + ) + ); + + self::$testArgs['testNestNested'] = + new Xtruct( + array( + 'string_thing' => 'worked', + 'byte_thing' => 0x01, + 'i32_thing' => pow(2, 30), + 'i64_thing' => self::$testArgs['testI64'] + ) + ); + + self::$testArgs['testNest'] = + new Xtruct2( + array( + 'byte_thing' => 0x01, + 'struct_thing' => self::$testArgs['testNestNested'], + 'i32_thing' => pow(2, 15) + ) + ); + + self::$testArgs['testMap'] = + array( + 7 => 77, + 8 => 88, + 9 => 99 + ); + + self::$testArgs['testStringMap'] = + array( + "a" => "123", + "a b" => "with spaces ", + "same" => "same", + "0" => "numeric key", + "longValue" => self::$testArgs['testString1'], + self::$testArgs['testString1'] => "long key" + ); + + self::$testArgs['testSet'] = array(1 => true, 5 => true, 6 => true); + + self::$testArgs['testList'] = array(1, 2, 3); + + self::$testArgs['testEnum'] = Numberz::ONE; + + self::$testArgs['testTypedef'] = 69; + + self::$testArgs['testMapMapExpectedResult'] = + array( + 4 => array( + 1 => 1, + 2 => 2, + 3 => 3, + 4 => 4, + ), + -4 => array( + -4 => -4, + -3 => -3, + -2 => -2, + -1 => -1 + ) + ); + + // testInsanity ... takes a few steps to set up! + + $xtruct1 = + new Xtruct( + array( + 'string_thing' => 'Goodbye4', + 'byte_thing' => 4, + 'i32_thing' => 4, + 'i64_thing' => 4 + ) + ); + + $xtruct2 = + new Xtruct( + array( + 'string_thing' => 'Hello2', + 'byte_thing' => 2, + 'i32_thing' => 2, + 'i64_thing' => 2 + ) + ); + + $userMap = + array( + Numberz::FIVE => 5, + Numberz::EIGHT => 8 + ); + + $insanity2 = + new Insanity( + array( + 'userMap' => $userMap, + 'xtructs' => array($xtruct1, $xtruct2) + ) + ); + + $insanity3 = $insanity2; + + $insanity6 = + new Insanity( + array( + 'userMap' => null, + 'xtructs' => null + ) + ); + + self::$testArgs['testInsanityExpectedResult'] = + array( + "1" => array( + Numberz::TWO => $insanity2, + Numberz::THREE => $insanity3 + ), + "2" => array( + Numberz::SIX => $insanity6 + ) + ); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/JsonSerialize/JsonSerializeTest.php b/vendor/git.apache.org/thrift.git/lib/php/test/JsonSerialize/JsonSerializeTest.php new file mode 100644 index 000000000..c6686525f --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/JsonSerialize/JsonSerializeTest.php @@ -0,0 +1,116 @@ +markTestSkipped('Requires PHP 5.4 or newer!'); + } + /** @var \Composer\Autoload\ClassLoader $loader */ + $loader = require __DIR__ . '/../../../../vendor/autoload.php'; + $loader->addPsr4('', __DIR__ . '/../packages/phpjs'); + } + + public function testEmptyStruct() + { + $empty = new \ThriftTest\EmptyStruct(array('non_existing_key' => 'bar')); + $this->assertEquals(new stdClass(), json_decode(json_encode($empty))); + } + + public function testStringsAndInts() + { + $input = array( + 'string_thing' => 'foo', + 'i64_thing' => 1234567890, + ); + $xtruct = new \ThriftTest\Xtruct($input); + + // Xtruct's 'i32_thing' and 'byte_thing' fields should not be present here! + $expected = new stdClass(); + $expected->string_thing = $input['string_thing']; + $expected->i64_thing = $input['i64_thing']; + $this->assertEquals($expected, json_decode(json_encode($xtruct))); + } + + public function testNestedStructs() + { + $xtruct2 = new \ThriftTest\Xtruct2(array( + 'byte_thing' => 42, + 'struct_thing' => new \ThriftTest\Xtruct(array( + 'i32_thing' => 123456, + )), + )); + + $expected = new stdClass(); + $expected->byte_thing = $xtruct2->byte_thing; + $expected->struct_thing = new stdClass(); + $expected->struct_thing->i32_thing = $xtruct2->struct_thing->i32_thing; + $this->assertEquals($expected, json_decode(json_encode($xtruct2))); + } + + public function testInsanity() + { + $xinput = array('string_thing' => 'foo'); + $xtruct = new \ThriftTest\Xtruct($xinput); + $insanity = new \ThriftTest\Insanity(array( + 'xtructs' => array($xtruct, $xtruct, $xtruct) + )); + $expected = new stdClass(); + $expected->xtructs = array((object)$xinput, (object)$xinput, (object)$xinput); + $this->assertEquals($expected, json_decode(json_encode($insanity))); + } + + public function testNestedLists() + { + $bonk = new \ThriftTest\Bonk(array('message' => 'foo')); + $nested = new \ThriftTest\NestedListsBonk(array('bonk' => array(array(array($bonk))))); + $expected = new stdClass(); + $expected->bonk = array(array(array((object)array('message' => 'foo')))); + $this->assertEquals($expected, json_decode(json_encode($nested))); + } + + public function testMaps() + { + $intmap = new \ThriftTest\ThriftTest_testMap_args(['thing' => [0 => 'zero']]); + $emptymap = new \ThriftTest\ThriftTest_testMap_args([]); + $this->assertEquals('{"thing":{"0":"zero"}}', json_encode($intmap)); + $this->assertEquals('{}', json_encode($emptymap)); + } + + public function testScalarTypes() + { + $b = new \ThriftTest\Bools(['im_true' => '1', 'im_false' => '0']); + $this->assertEquals('{"im_true":true,"im_false":false}', json_encode($b)); + $s = new \ThriftTest\StructA(['s' => 42]); + $this->assertEquals('{"s":"42"}', json_encode($s)); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Makefile.am b/vendor/git.apache.org/thrift.git/lib/php/test/Makefile.am index c872b1aa1..482468826 100755 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Makefile.am +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Makefile.am @@ -17,9 +17,11 @@ # under the License. # +PHPUNIT=php $(top_srcdir)/vendor/bin/phpunit + stubs: ../../../test/ThriftTest.thrift TestValidators.thrift - mkdir -p ./packages - $(THRIFT) --gen php -r --out ./packages ../../../test/ThriftTest.thrift + mkdir -p ./packages/php + $(THRIFT) --gen php -r --out ./packages/php ../../../test/ThriftTest.thrift mkdir -p ./packages/phpv mkdir -p ./packages/phpvo mkdir -p ./packages/phpjs @@ -27,23 +29,21 @@ stubs: ../../../test/ThriftTest.thrift TestValidators.thrift $(THRIFT) --gen php:validate,oop -r --out ./packages/phpvo TestValidators.thrift $(THRIFT) --gen php:json -r --out ./packages/phpjs TestValidators.thrift -check-json-serializer: stubs -if HAVE_PHPUNIT - $(PHPUNIT) --log-junit=TEST-json-serializer.xml Test/Thrift/JsonSerialize/ -endif +deps: $(top_srcdir)/composer.json + composer install --working-dir=$(top_srcdir) -check-validator: stubs - php Test/Thrift/TestValidators.php - php Test/Thrift/TestValidators.php -oop +all-local: deps -check-protocol: stubs -if HAVE_PHPUNIT - $(PHPUNIT) --log-junit=TEST-log-json-protocol.xml Test/Thrift/Protocol/TestTJSONProtocol.php - $(PHPUNIT) --log-junit=TEST-binary-serializer.xml Test/Thrift/Protocol/TestBinarySerializer.php - $(PHPUNIT) --log-junit=TEST-log-simple-json-protocol.xml Test/Thrift/Protocol/TestTSimpleJSONProtocol.php -endif +check-json-serializer: deps stubs + $(PHPUNIT) --log-junit=TEST-log-json-serializer.xml JsonSerialize/ -check: stubs \ +check-validator: deps stubs + $(PHPUNIT) --log-junit=TEST-log-validator.xml Validator/ + +check-protocol: deps stubs + $(PHPUNIT) --log-junit=TEST-log-protocol.xml Protocol/ + +check: deps stubs \ check-protocol \ check-validator \ check-json-serializer @@ -51,9 +51,3 @@ check: stubs \ clean-local: $(RM) -r ./packages $(RM) TEST-*.xml - -EXTRA_DIST = \ - Test \ - TestValidators.thrift - - diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestBinarySerializer.php b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/BinarySerializerTest.php similarity index 54% rename from vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestBinarySerializer.php rename to vendor/git.apache.org/thrift.git/lib/php/test/Protocol/BinarySerializerTest.php index a9832162b..71b0bb506 100644 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestBinarySerializer.php +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/BinarySerializerTest.php @@ -23,16 +23,10 @@ namespace Test\Thrift\Protocol; -use Thrift\ClassLoader\ThriftClassLoader; +use PHPUnit\Framework\TestCase; use Thrift\Serializer\TBinarySerializer; -require_once __DIR__.'/../../../../lib/Thrift/ClassLoader/ThriftClassLoader.php'; - -$loader = new ThriftClassLoader(); -$loader->registerNamespace('Thrift', __DIR__ . '/../../../../lib'); -$loader->registerNamespace('Test', __DIR__ . '/../../..'); -$loader->registerDefinition('ThriftTest', __DIR__ . '/../../../packages'); -$loader->register(); +require __DIR__ . '/../../../../vendor/autoload.php'; /*** * This test suite depends on running the compiler against the @@ -40,25 +34,27 @@ $loader->register(); * * lib/php/test$ ../../../compiler/cpp/thrift --gen php -r \ * --out ./packages ../../../test/ThriftTest.thrift + * + * @runTestsInSeparateProcesses */ - -class TestBinarySerializer extends \PHPUnit_Framework_TestCase +class BinarySerializerTest extends TestCase { + public function setUp() + { + /** @var \Composer\Autoload\ClassLoader $loader */ + $loader = require __DIR__ . '/../../../../vendor/autoload.php'; + $loader->addPsr4('', __DIR__ . '/../packages/php'); + } - public function setUp() - { - } - - /** - * We try to serialize and deserialize a random object to make sure no exceptions are thrown. - * @see THRIFT-1579 - */ - public function testBinarySerializer() - { - $struct = new \ThriftTest\Xtruct(array('string_thing' => 'abc')); - $serialized = TBinarySerializer::serialize($struct, 'ThriftTest\\Xtruct'); - $deserialized = TBinarySerializer::deserialize($serialized, 'ThriftTest\\Xtruct'); - $this->assertEquals($struct, $deserialized); - } - + /** + * We try to serialize and deserialize a random object to make sure no exceptions are thrown. + * @see THRIFT-1579 + */ + public function testBinarySerializer() + { + $struct = new \ThriftTest\Xtruct(array('string_thing' => 'abc')); + $serialized = TBinarySerializer::serialize($struct, 'ThriftTest\\Xtruct'); + $deserialized = TBinarySerializer::deserialize($serialized, 'ThriftTest\\Xtruct'); + $this->assertEquals($struct, $deserialized); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolFixtures.php b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolFixtures.php new file mode 100644 index 000000000..dd9039fca --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolFixtures.php @@ -0,0 +1,74 @@ +<><"}}'; + + self::$testArgsJSON['testString3'] = '{"1":{"str":"string that ends in double-backslash \\\\\\\\"}}'; + + self::$testArgsJSON['testUnicodeStringWithNonBMP'] = '{"1":{"str":"สวัสดี\/𝒯"}}'; + + self::$testArgsJSON['testDouble'] = '{"1":{"dbl":3.1415926535898}}'; + + self::$testArgsJSON['testByte'] = '{"1":{"i8":1}}'; + + self::$testArgsJSON['testI32'] = '{"1":{"i32":1073741824}}'; + + if (PHP_INT_SIZE == 8) { + self::$testArgsJSON['testI64'] = '{"1":{"i64":' . pow(2, 60) . '}}'; + self::$testArgsJSON['testStruct'] = '{"1":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":' . pow(2, 60) . '}}}}'; + self::$testArgsJSON['testNest'] = '{"1":{"rec":{"1":{"i8":1},"2":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":' . pow(2, 60) . '}}},"3":{"i32":32768}}}}'; + } else { + self::$testArgsJSON['testI64'] = '{"1":{"i64":1152921504606847000}}'; + self::$testArgsJSON['testStruct'] = '{"1":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":1152921504606847000}}}}'; + self::$testArgsJSON['testNest'] = '{"1":{"rec":{"1":{"i8":1},"2":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":1152921504606847000}}},"3":{"i32":32768}}}}'; + } + + self::$testArgsJSON['testMap'] = '{"1":{"map":["i32","i32",3,{"7":77,"8":88,"9":99}]}}'; + + self::$testArgsJSON['testStringMap'] = '{"1":{"map":["str","str",6,{"a":"123","a b":"with spaces ","same":"same","0":"numeric key","longValue":"Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e","Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e":"long key"}]}}'; + + self::$testArgsJSON['testSet'] = '{"1":{"set":["i32",3,1,5,6]}}'; + + self::$testArgsJSON['testList'] = '{"1":{"lst":["i32",3,1,2,3]}}'; + + self::$testArgsJSON['testEnum'] = '{"1":{"i32":1}}'; + + self::$testArgsJSON['testTypedef'] = '{"1":{"i64":69}}'; + + self::$testArgsJSON['testMapMap'] = '{"0":{"map":["i32","map",2,{"4":["i32","i32",4,{"1":1,"2":2,"3":3,"4":4}],"-4":["i32","i32",4,{"-4":-4,"-3":-3,"-2":-2,"-1":-1}]}]}}'; + + self::$testArgsJSON['testInsanity'] = '{"0":{"map":["i64","map",2,{"1":["i32","rec",2,{"2":{"1":{"map":["i32","i64",2,{"5":5,"8":8}]},"2":{"lst":["rec",2,{"1":{"str":"Goodbye4"},"4":{"i8":4},"9":{"i32":4},"11":{"i64":4}},{"1":{"str":"Hello2"},"4":{"i8":2},"9":{"i32":2},"11":{"i64":2}}]}},"3":{"1":{"map":["i32","i64",2,{"5":5,"8":8}]},"2":{"lst":["rec",2,{"1":{"str":"Goodbye4"},"4":{"i8":4},"9":{"i32":4},"11":{"i64":4}},{"1":{"str":"Hello2"},"4":{"i8":2},"9":{"i32":2},"11":{"i64":2}}]}}}],"2":["i32","rec",1,{"6":{}}]}]}}'; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolTest.php b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolTest.php new file mode 100644 index 000000000..bf0ecce42 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TJSONProtocolTest.php @@ -0,0 +1,518 @@ +addPsr4('', __DIR__ . '/../packages/php'); + + Fixtures::populateTestArgs(); + TJSONProtocolFixtures::populateTestArgsJSON(); + } + + public function setUp() + { + $this->transport = new TMemoryBuffer(); + $this->protocol = new TJSONProtocol($this->transport); + $this->transport->open(); + } + + /** + * WRITE TESTS + */ + public function testVoidWrite() + { + $args = new \ThriftTest\ThriftTest_testVoid_args(); + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testVoid']; + + $this->assertEquals($expected, $actual); + } + + public function testString1Write() + { + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->thing = Fixtures::$testArgs['testString1']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testString1']; + + $this->assertEquals($expected, $actual); + } + + public function testString2Write() + { + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->thing = Fixtures::$testArgs['testString2']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testString2']; + + $this->assertEquals($expected, $actual); + } + + public function testDoubleWrite() + { + $args = new \ThriftTest\ThriftTest_testDouble_args(); + $args->thing = Fixtures::$testArgs['testDouble']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testDouble']; + + $this->assertEquals($expected, $actual); + } + + public function testByteWrite() + { + $args = new \ThriftTest\ThriftTest_testByte_args(); + $args->thing = Fixtures::$testArgs['testByte']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testByte']; + + $this->assertEquals($expected, $actual); + } + + public function testI32Write() + { + $args = new \ThriftTest\ThriftTest_testI32_args(); + $args->thing = Fixtures::$testArgs['testI32']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testI32']; + + $this->assertEquals($expected, $actual); + } + + public function testI64Write() + { + $args = new \ThriftTest\ThriftTest_testI64_args(); + $args->thing = Fixtures::$testArgs['testI64']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testI64']; + + $this->assertEquals($expected, $actual); + } + + public function testStructWrite() + { + $args = new \ThriftTest\ThriftTest_testStruct_args(); + $args->thing = Fixtures::$testArgs['testStruct']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testStruct']; + + $this->assertEquals($expected, $actual); + } + + public function testNestWrite() + { + $args = new \ThriftTest\ThriftTest_testNest_args(); + $args->thing = Fixtures::$testArgs['testNest']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testNest']; + + $this->assertEquals($expected, $actual); + } + + public function testMapWrite() + { + $args = new \ThriftTest\ThriftTest_testMap_args(); + $args->thing = Fixtures::$testArgs['testMap']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testMap']; + + $this->assertEquals($expected, $actual); + } + + public function testStringMapWrite() + { + $args = new \ThriftTest\ThriftTest_testStringMap_args(); + $args->thing = Fixtures::$testArgs['testStringMap']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testStringMap']; + + /* + * The $actual returns unescaped string. + * It is required to to decode then encode it again + * to get the expected escaped unicode. + */ + $this->assertEquals($expected, json_encode(json_decode($actual))); + } + + public function testSetWrite() + { + $args = new \ThriftTest\ThriftTest_testSet_args(); + $args->thing = Fixtures::$testArgs['testSet']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testSet']; + + $this->assertEquals($expected, $actual); + } + + public function testListWrite() + { + $args = new \ThriftTest\ThriftTest_testList_args(); + $args->thing = Fixtures::$testArgs['testList']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testList']; + + $this->assertEquals($expected, $actual); + } + + public function testEnumWrite() + { + $args = new \ThriftTest\ThriftTest_testEnum_args(); + $args->thing = Fixtures::$testArgs['testEnum']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testEnum']; + + $this->assertEquals($expected, $actual); + } + + public function testTypedefWrite() + { + $args = new \ThriftTest\ThriftTest_testTypedef_args(); + $args->thing = Fixtures::$testArgs['testTypedef']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testTypedef']; + + $this->assertEquals($expected, $actual); + } + + /** + * READ TESTS + */ + public function testVoidRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testVoid'] + ); + $args = new \ThriftTest\ThriftTest_testVoid_args(); + $args->read($this->protocol); + } + + public function testString1Read() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testString1'] + ); + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testString1']; + + $this->assertEquals($expected, $actual); + } + + public function testString2Read() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testString2'] + ); + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testString2']; + + $this->assertEquals($expected, $actual); + } + + public function testString3Write() + { + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->thing = Fixtures::$testArgs['testString3']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testString3']; + + $this->assertEquals($expected, $actual); + } + + public function testString4Write() + { + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->thing = Fixtures::$testArgs['testUnicodeStringWithNonBMP']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TJSONProtocolFixtures::$testArgsJSON['testUnicodeStringWithNonBMP']; + + $this->assertEquals($expected, $actual); + } + + public function testDoubleRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testDouble'] + ); + $args = new \ThriftTest\ThriftTest_testDouble_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testDouble']; + + $this->assertEquals($expected, $actual); + } + + public function testByteRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testByte'] + ); + $args = new \ThriftTest\ThriftTest_testByte_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testByte']; + + $this->assertEquals($expected, $actual); + } + + public function testI32Read() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testI32'] + ); + $args = new \ThriftTest\ThriftTest_testI32_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testI32']; + + $this->assertEquals($expected, $actual); + } + + public function testI64Read() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testI64'] + ); + $args = new \ThriftTest\ThriftTest_testI64_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testI64']; + + $this->assertEquals($expected, $actual); + } + + public function testStructRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testStruct'] + ); + $args = new \ThriftTest\ThriftTest_testStruct_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testStruct']; + + $this->assertEquals($expected, $actual); + } + + public function testNestRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testNest'] + ); + $args = new \ThriftTest\ThriftTest_testNest_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testNest']; + + $this->assertEquals($expected, $actual); + } + + public function testMapRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testMap'] + ); + $args = new \ThriftTest\ThriftTest_testMap_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testMap']; + + $this->assertEquals($expected, $actual); + } + + public function testStringMapRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testStringMap'] + ); + $args = new \ThriftTest\ThriftTest_testStringMap_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testStringMap']; + + $this->assertEquals($expected, $actual); + } + + public function testSetRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testSet'] + ); + $args = new \ThriftTest\ThriftTest_testSet_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testSet']; + + $this->assertEquals($expected, $actual); + } + + public function testListRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testList'] + ); + $args = new \ThriftTest\ThriftTest_testList_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testList']; + + $this->assertEquals($expected, $actual); + } + + public function testEnumRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testEnum'] + ); + $args = new \ThriftTest\ThriftTest_testEnum_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testEnum']; + + $this->assertEquals($expected, $actual); + } + + public function testTypedefRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testTypedef'] + ); + $args = new \ThriftTest\ThriftTest_testTypedef_args(); + $args->read($this->protocol); + + $actual = $args->thing; + $expected = Fixtures::$testArgs['testTypedef']; + + $this->assertEquals($expected, $actual); + } + + public function testMapMapRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testMapMap'] + ); + $result = new \ThriftTest\ThriftTest_testMapMap_result(); + $result->read($this->protocol); + + $actual = $result->success; + $expected = Fixtures::$testArgs['testMapMapExpectedResult']; + + $this->assertEquals($expected, $actual); + } + + public function testInsanityRead() + { + $this->transport->write( + TJSONProtocolFixtures::$testArgsJSON['testInsanity'] + ); + $result = new \ThriftTest\ThriftTest_testInsanity_result(); + $result->read($this->protocol); + + $actual = $result->success; + $expected = Fixtures::$testArgs['testInsanityExpectedResult']; + + $this->assertEquals($expected, $actual); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolFixtures.php b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolFixtures.php new file mode 100644 index 000000000..547fd8662 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolFixtures.php @@ -0,0 +1,67 @@ +<><"}'; + + self::$testArgsJSON['testDouble'] = '{"thing":3.1415926535898}'; + + self::$testArgsJSON['testByte'] = '{"thing":1}'; + + self::$testArgsJSON['testI32'] = '{"thing":1073741824}'; + + if (PHP_INT_SIZE == 8) { + self::$testArgsJSON['testI64'] = '{"thing":' . pow(2, 60) . '}'; + self::$testArgsJSON['testStruct'] = '{"thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":' . pow(2, 60) . '}}'; + self::$testArgsJSON['testNest'] = '{"thing":{"byte_thing":1,"struct_thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":' . pow(2, 60) . '},"i32_thing":32768}}'; + } else { + self::$testArgsJSON['testI64'] = '{"thing":1152921504606847000}'; + + self::$testArgsJSON['testStruct'] = '{"thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":1152921504606847000}}'; + self::$testArgsJSON['testNest'] = '{"thing":{"byte_thing":1,"struct_thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":1152921504606847000},"i32_thing":32768}}'; + } + + self::$testArgsJSON['testMap'] = '{"thing":{"7":77,"8":88,"9":99}}'; + + self::$testArgsJSON['testStringMap'] = '{"thing":{"a":"123","a b":"with spaces ","same":"same","0":"numeric key","longValue":"Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e","Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e":"long key"}}'; + + self::$testArgsJSON['testSet'] = '{"thing":[1,5,6]}'; + + self::$testArgsJSON['testList'] = '{"thing":[1,2,3]}'; + + self::$testArgsJSON['testEnum'] = '{"thing":1}'; + + self::$testArgsJSON['testTypedef'] = '{"thing":69}'; + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolTest.php b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolTest.php new file mode 100644 index 000000000..e4a13736e --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Protocol/TSimpleJSONProtocolTest.php @@ -0,0 +1,254 @@ +addPsr4('', __DIR__ . '/../packages/php'); + + Fixtures::populateTestArgs(); + TSimpleJSONProtocolFixtures::populateTestArgsSimpleJSON(); + } + + public function setUp() + { + $this->transport = new TMemoryBuffer(); + $this->protocol = new TSimpleJSONProtocol($this->transport); + $this->transport->open(); + } + + /** + * WRITE TESTS + */ + public function testVoidWrite() + { + $args = new \ThriftTest\ThriftTest_testVoid_args(); + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testVoid']; + + $this->assertEquals($expected, $actual); + } + + public function testString1Write() + { + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->thing = Fixtures::$testArgs['testString1']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testString1']; + + $this->assertEquals($expected, $actual); + } + + public function testString2Write() + { + $args = new \ThriftTest\ThriftTest_testString_args(); + $args->thing = Fixtures::$testArgs['testString2']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testString2']; + + $this->assertEquals($expected, $actual); + } + + public function testDoubleWrite() + { + $args = new \ThriftTest\ThriftTest_testDouble_args(); + $args->thing = Fixtures::$testArgs['testDouble']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testDouble']; + + $this->assertEquals($expected, $actual); + } + + public function testByteWrite() + { + $args = new \ThriftTest\ThriftTest_testByte_args(); + $args->thing = Fixtures::$testArgs['testByte']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testByte']; + + $this->assertEquals($expected, $actual); + } + + public function testI32Write() + { + $args = new \ThriftTest\ThriftTest_testI32_args(); + $args->thing = Fixtures::$testArgs['testI32']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testI32']; + + $this->assertEquals($expected, $actual); + } + + public function testI64Write() + { + $args = new \ThriftTest\ThriftTest_testI64_args(); + $args->thing = Fixtures::$testArgs['testI64']; + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testI64']; + + $this->assertEquals($expected, $actual); + } + + public function testStructWrite() + { + $args = new \ThriftTest\ThriftTest_testStruct_args(); + $args->thing = Fixtures::$testArgs['testStruct']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testStruct']; + + $this->assertEquals($expected, $actual); + } + + public function testNestWrite() + { + $args = new \ThriftTest\ThriftTest_testNest_args(); + $args->thing = Fixtures::$testArgs['testNest']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testNest']; + + $this->assertEquals($expected, $actual); + } + + public function testMapWrite() + { + $args = new \ThriftTest\ThriftTest_testMap_args(); + $args->thing = Fixtures::$testArgs['testMap']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testMap']; + + $this->assertEquals($expected, $actual); + } + + public function testStringMapWrite() + { + $args = new \ThriftTest\ThriftTest_testStringMap_args(); + $args->thing = Fixtures::$testArgs['testStringMap']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testStringMap']; + + $this->assertEquals($expected, $actual); + } + + public function testSetWrite() + { + $args = new \ThriftTest\ThriftTest_testSet_args(); + $args->thing = Fixtures::$testArgs['testSet']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testSet']; + + $this->assertEquals($expected, $actual); + } + + public function testListWrite() + { + $args = new \ThriftTest\ThriftTest_testList_args(); + $args->thing = Fixtures::$testArgs['testList']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testList']; + + $this->assertEquals($expected, $actual); + } + + public function testEnumWrite() + { + $args = new \ThriftTest\ThriftTest_testEnum_args(); + $args->thing = Fixtures::$testArgs['testEnum']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testEnum']; + + $this->assertEquals($expected, $actual); + } + + public function testTypedefWrite() + { + $args = new \ThriftTest\ThriftTest_testTypedef_args(); + $args->thing = Fixtures::$testArgs['testTypedef']; + + $args->write($this->protocol); + + $actual = $this->transport->read(Fixtures::$bufsize); + $expected = TSimpleJSONProtocolFixtures::$testArgsJSON['testTypedef']; + + $this->assertEquals($expected, $actual); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Fixtures.php b/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Fixtures.php deleted file mode 100644 index 2c60a08f9..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Fixtures.php +++ /dev/null @@ -1,194 +0,0 @@ -<><"; - - self::$testArgs['testString3'] = - "string that ends in double-backslash \\\\"; - - self::$testArgs['testUnicodeStringWithNonBMP'] = - "สวัสดี/𝒯"; - - self::$testArgs['testDouble'] = 3.1415926535898; - - // TODO: add testBinary() call - - self::$testArgs['testByte'] = 0x01; - - self::$testArgs['testI32'] = pow( 2, 30 ); - - if (PHP_INT_SIZE == 8) { - self::$testArgs['testI64'] = pow( 2, 60 ); - } else { - self::$testArgs['testI64'] = "1152921504606847000"; - } - - self::$testArgs['testStruct'] = - new Xtruct( - array( - 'string_thing' => 'worked', - 'byte_thing' => 0x01, - 'i32_thing' => pow( 2, 30 ), - 'i64_thing' => self::$testArgs['testI64'] - ) - ); - - self::$testArgs['testNestNested'] = - new Xtruct( - array( - 'string_thing' => 'worked', - 'byte_thing' => 0x01, - 'i32_thing' => pow( 2, 30 ), - 'i64_thing' => self::$testArgs['testI64'] - ) - ); - - self::$testArgs['testNest'] = - new Xtruct2( - array( - 'byte_thing' => 0x01, - 'struct_thing' => self::$testArgs['testNestNested'], - 'i32_thing' => pow( 2, 15 ) - ) - ); - - self::$testArgs['testMap'] = - array( - 7 => 77, - 8 => 88, - 9 => 99 - ); - - self::$testArgs['testStringMap'] = - array( - "a" => "123", - "a b" => "with spaces ", - "same" => "same", - "0" => "numeric key", - "longValue" => self::$testArgs['testString1'], - self::$testArgs['testString1'] => "long key" - ); - - self::$testArgs['testSet'] = array( 1 => true, 5 => true, 6 => true ); - - self::$testArgs['testList'] = array( 1, 2, 3 ); - - self::$testArgs['testEnum'] = Numberz::ONE; - - self::$testArgs['testTypedef'] = 69; - - self::$testArgs['testMapMapExpectedResult'] = - array( - 4 => array( - 1 => 1, - 2 => 2, - 3 => 3, - 4 => 4, - ), - -4 => array( - -4 => -4, - -3 => -3, - -2 => -2, - -1 => -1 - ) - ); - - // testInsanity ... takes a few steps to set up! - - $xtruct1 = - new Xtruct( - array( - 'string_thing' => 'Goodbye4', - 'byte_thing' => 4, - 'i32_thing' => 4, - 'i64_thing' => 4 - ) - ); - - $xtruct2 = - new Xtruct( - array( - 'string_thing' => 'Hello2', - 'byte_thing' =>2, - 'i32_thing' => 2, - 'i64_thing' => 2 - ) - ); - - $userMap = - array( - Numberz::FIVE => 5, - Numberz::EIGHT => 8 - ); - - $insanity2 = - new Insanity( - array( - 'userMap' => $userMap, - 'xtructs' => array($xtruct1,$xtruct2) - ) - ); - - $insanity3 = $insanity2; - - $insanity6 = - new Insanity( - array( - 'userMap' => null, - 'xtructs' => null - ) - ); - - self::$testArgs['testInsanityExpectedResult'] = - array( - "1" => array( - Numberz::TWO => $insanity2, - Numberz::THREE => $insanity3 - ), - "2" => array( - Numberz::SIX => $insanity6 - ) - ); - - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/JsonSerialize/JsonSerializeTest.php b/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/JsonSerialize/JsonSerializeTest.php deleted file mode 100644 index 2471b520b..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/JsonSerialize/JsonSerializeTest.php +++ /dev/null @@ -1,116 +0,0 @@ -registerNamespace('Thrift', __DIR__ . '/../../../../lib'); -$loader->registerNamespace('Test', __DIR__ . '/../../..'); -$loader->registerDefinition('ThriftTest', __DIR__ . '/../../../packages/phpjs'); -$loader->register(); - -class JsonSerializeTest extends \PHPUnit_Framework_TestCase -{ - protected function setUp() { - if (version_compare(phpversion(), '5.4', '<')) { - $this->markTestSkipped('Requires PHP 5.4 or newer!'); - } - } - - public function testEmptyStruct() - { - $empty = new \ThriftTest\EmptyStruct(array('non_existing_key' => 'bar')); - $this->assertEquals(new stdClass(), json_decode(json_encode($empty))); - } - - public function testStringsAndInts() - { - $input = array( - 'string_thing' => 'foo', - 'i64_thing' => 1234567890, - ); - $xtruct = new \ThriftTest\Xtruct($input); - - // Xtruct's 'i32_thing' and 'byte_thing' fields should not be present here! - $expected = new stdClass(); - $expected->string_thing = $input['string_thing']; - $expected->i64_thing = $input['i64_thing']; - $this->assertEquals($expected, json_decode(json_encode($xtruct))); - } - - public function testNestedStructs() - { - $xtruct2 = new \ThriftTest\Xtruct2(array( - 'byte_thing' => 42, - 'struct_thing' => new \ThriftTest\Xtruct(array( - 'i32_thing' => 123456, - )), - )); - - $expected = new stdClass(); - $expected->byte_thing = $xtruct2->byte_thing; - $expected->struct_thing = new stdClass(); - $expected->struct_thing->i32_thing = $xtruct2->struct_thing->i32_thing; - $this->assertEquals($expected, json_decode(json_encode($xtruct2))); - } - - public function testInsanity() - { - $xinput = array('string_thing' => 'foo'); - $xtruct = new \ThriftTest\Xtruct($xinput); - $insanity = new \ThriftTest\Insanity(array( - 'xtructs' => array($xtruct, $xtruct, $xtruct) - )); - $expected = new stdClass(); - $expected->xtructs = array((object) $xinput, (object) $xinput, (object) $xinput); - $this->assertEquals($expected, json_decode(json_encode($insanity))); - } - - public function testNestedLists() - { - $bonk = new \ThriftTest\Bonk(array('message' => 'foo')); - $nested = new \ThriftTest\NestedListsBonk(array('bonk' => array(array(array($bonk))))); - $expected = new stdClass(); - $expected->bonk = array(array(array((object) array('message' => 'foo')))); - $this->assertEquals($expected, json_decode(json_encode($nested))); - } - - public function testMaps() - { - $intmap = new \ThriftTest\ThriftTest_testMap_args(['thing' => [0 => 'zero']]); - $emptymap = new \ThriftTest\ThriftTest_testMap_args([]); - $this->assertEquals('{"thing":{"0":"zero"}}', json_encode($intmap)); - $this->assertEquals('{}', json_encode($emptymap)); - } - - public function testScalarTypes() - { - $b = new \ThriftTest\Bools(['im_true' => '1', 'im_false' => '0']); - $this->assertEquals('{"im_true":true,"im_false":false}', json_encode($b)); - $s = new \ThriftTest\StructA(['s' => 42]); - $this->assertEquals('{"s":"42"}', json_encode($s)); - } - -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTJSONProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTJSONProtocol.php deleted file mode 100755 index a4ca9d573..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTJSONProtocol.php +++ /dev/null @@ -1,583 +0,0 @@ -registerNamespace('Thrift', __DIR__ . '/../../../../lib'); -$loader->registerNamespace('Test', __DIR__ . '/../../..'); -$loader->registerDefinition('ThriftTest', __DIR__ . '/../../../packages'); -$loader->register(); - -/*** - * This test suite depends on running the compiler against the - * standard ThriftTest.thrift file: - * - * lib/php/test$ ../../../compiler/cpp/thrift --gen php -r \ - * --out ./packages ../../../test/ThriftTest.thrift - */ - -class TestTJSONProtocol extends \PHPUnit_Framework_TestCase -{ - private $transport; - private $protocol; - - public static function setUpBeforeClass() - { - Fixtures::populateTestArgs(); - TestTJSONProtocol_Fixtures::populateTestArgsJSON(); - } - - public function setUp() - { - $this->transport = new TMemoryBuffer(); - $this->protocol = new TJSONProtocol($this->transport); - $this->transport->open(); - } - - /*** - * WRITE TESTS - */ - - public function testVoid_Write() - { - $args = new \ThriftTest\ThriftTest_testVoid_args(); - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testVoid']; - - $this->assertEquals( $expected, $actual ); - } - - public function testString1_Write() - { - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->thing = Fixtures::$testArgs['testString1']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testString1']; - - #$this->assertEquals( $expected, $actual ); - } - - public function testString2_Write() - { - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->thing = Fixtures::$testArgs['testString2']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testString2']; - - $this->assertEquals( $expected, $actual ); - } - - public function testDouble_Write() - { - $args = new \ThriftTest\ThriftTest_testDouble_args(); - $args->thing = Fixtures::$testArgs['testDouble']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testDouble']; - - $this->assertEquals( $expected, $actual ); - } - - public function testByte_Write() - { - $args = new \ThriftTest\ThriftTest_testByte_args(); - $args->thing = Fixtures::$testArgs['testByte']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testByte']; - - $this->assertEquals( $expected, $actual ); - } - - public function testI32_Write() - { - $args = new \ThriftTest\ThriftTest_testI32_args(); - $args->thing = Fixtures::$testArgs['testI32']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testI32']; - - $this->assertEquals( $expected, $actual ); - } - - public function testI64_Write() - { - $args = new \ThriftTest\ThriftTest_testI64_args(); - $args->thing = Fixtures::$testArgs['testI64']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testI64']; - - $this->assertEquals( $expected, $actual ); - } - - public function testStruct_Write() - { - $args = new \ThriftTest\ThriftTest_testStruct_args(); - $args->thing = Fixtures::$testArgs['testStruct']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testStruct']; - - $this->assertEquals( $expected, $actual ); - } - - public function testNest_Write() - { - $args = new \ThriftTest\ThriftTest_testNest_args(); - $args->thing = Fixtures::$testArgs['testNest']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testNest']; - - $this->assertEquals( $expected, $actual ); - } - - public function testMap_Write() - { - $args = new \ThriftTest\ThriftTest_testMap_args(); - $args->thing = Fixtures::$testArgs['testMap']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testMap']; - - $this->assertEquals( $expected, $actual ); - } - - public function testStringMap_Write() - { - $args = new \ThriftTest\ThriftTest_testStringMap_args(); - $args->thing = Fixtures::$testArgs['testStringMap']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testStringMap']; - - /* - * The $actual returns unescaped string. - * It is required to to decode then encode it again - * to get the expected escaped unicode. - */ - $this->assertEquals( $expected, json_encode(json_decode($actual)) ); - } - - public function testSet_Write() - { - $args = new \ThriftTest\ThriftTest_testSet_args(); - $args->thing = Fixtures::$testArgs['testSet']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testSet']; - - $this->assertEquals( $expected, $actual ); - } - - public function testList_Write() - { - $args = new \ThriftTest\ThriftTest_testList_args(); - $args->thing = Fixtures::$testArgs['testList']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testList']; - - $this->assertEquals( $expected, $actual ); - } - - public function testEnum_Write() - { - $args = new \ThriftTest\ThriftTest_testEnum_args(); - $args->thing = Fixtures::$testArgs['testEnum']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testEnum']; - - $this->assertEquals( $expected, $actual ); - } - - public function testTypedef_Write() - { - $args = new \ThriftTest\ThriftTest_testTypedef_args(); - $args->thing = Fixtures::$testArgs['testTypedef']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testTypedef']; - - $this->assertEquals( $expected, $actual ); - } - - /*** - * READ TESTS - */ - - public function testVoid_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testVoid'] - ); - $args = new \ThriftTest\ThriftTest_testVoid_args(); - $args->read( $this->protocol ); - } - - public function testString1_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testString1'] - ); - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testString1']; - - $this->assertEquals( $expected, $actual ); - } - - public function testString2_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testString2'] - ); - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testString2']; - - $this->assertEquals( $expected, $actual ); - } - - public function testString3_Write() - { - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->thing = Fixtures::$testArgs['testString3']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testString3']; - - $this->assertEquals( $expected, $actual ); - } - - public function testString4_Write() - { - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->thing = Fixtures::$testArgs['testUnicodeStringWithNonBMP']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTJSONProtocol_Fixtures::$testArgsJSON['testUnicodeStringWithNonBMP']; - - $this->assertEquals( $expected, $actual ); - } - - public function testDouble_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testDouble'] - ); - $args = new \ThriftTest\ThriftTest_testDouble_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testDouble']; - - $this->assertEquals( $expected, $actual ); - } - - public function testByte_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testByte'] - ); - $args = new \ThriftTest\ThriftTest_testByte_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testByte']; - - $this->assertEquals( $expected, $actual ); - } - - public function testI32_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testI32'] - ); - $args = new \ThriftTest\ThriftTest_testI32_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testI32']; - - $this->assertEquals( $expected, $actual ); - } - - public function testI64_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testI64'] - ); - $args = new \ThriftTest\ThriftTest_testI64_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testI64']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testStruct_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testStruct'] - ); - $args = new \ThriftTest\ThriftTest_testStruct_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testStruct']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testNest_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testNest'] - ); - $args = new \ThriftTest\ThriftTest_testNest_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testNest']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testMap_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testMap'] - ); - $args = new \ThriftTest\ThriftTest_testMap_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testMap']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testStringMap_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testStringMap'] - ); - $args = new \ThriftTest\ThriftTest_testStringMap_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testStringMap']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testSet_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testSet'] - ); - $args = new \ThriftTest\ThriftTest_testSet_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testSet']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testList_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testList'] - ); - $args = new \ThriftTest\ThriftTest_testList_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testList']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testEnum_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testEnum'] - ); - $args = new \ThriftTest\ThriftTest_testEnum_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testEnum']; - - $this->assertEquals( $expected, $actual ); - - } - - public function testTypedef_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testTypedef'] - ); - $args = new \ThriftTest\ThriftTest_testTypedef_args(); - $args->read( $this->protocol ); - - $actual = $args->thing; - $expected = Fixtures::$testArgs['testTypedef']; - - $this->assertEquals( $expected, $actual ); - } - - public function testMapMap_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testMapMap'] - ); - $result = new \ThriftTest\ThriftTest_testMapMap_result(); - $result->read( $this->protocol ); - - $actual = $result->success; - $expected = Fixtures::$testArgs['testMapMapExpectedResult']; - - $this->assertEquals( $expected, $actual ); - } - - public function testInsanity_Read() - { - $this->transport->write( - TestTJSONProtocol_Fixtures::$testArgsJSON['testInsanity'] - ); - $result = new \ThriftTest\ThriftTest_testInsanity_result(); - $result->read( $this->protocol ); - - $actual = $result->success; - $expected = Fixtures::$testArgs['testInsanityExpectedResult']; - - $this->assertEquals( $expected, $actual ); - } - -} - -class TestTJSONProtocol_Fixtures -{ - public static $testArgsJSON = array(); - - public static function populateTestArgsJSON() - { - self::$testArgsJSON['testVoid'] = '{}'; - - self::$testArgsJSON['testString1'] = '{"1":{"str":"Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e"}}'; - - self::$testArgsJSON['testString2'] = '{"1":{"str":"quote: \\\\\" backslash: forwardslash-escaped: \\\\\/ backspace: \\\\b formfeed: \f newline: \n return: \r tab: now-all-of-them-together: \"\\\\\\\\\/\\\\b\n\r\t now-a-bunch-of-junk: !@#$%&()(&%$#{}{}<><><"}}'; - - self::$testArgsJSON['testString3'] = '{"1":{"str":"string that ends in double-backslash \\\\\\\\"}}'; - - self::$testArgsJSON['testUnicodeStringWithNonBMP'] = '{"1":{"str":"สวัสดี\/𝒯"}}'; - - self::$testArgsJSON['testDouble'] = '{"1":{"dbl":3.1415926535898}}'; - - self::$testArgsJSON['testByte'] = '{"1":{"i8":1}}'; - - self::$testArgsJSON['testI32'] = '{"1":{"i32":1073741824}}'; - - if (PHP_INT_SIZE == 8) { - self::$testArgsJSON['testI64'] = '{"1":{"i64":'.pow( 2, 60 ).'}}'; - self::$testArgsJSON['testStruct'] = '{"1":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":'.pow( 2, 60 ).'}}}}'; - self::$testArgsJSON['testNest'] = '{"1":{"rec":{"1":{"i8":1},"2":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":'.pow( 2, 60 ).'}}},"3":{"i32":32768}}}}'; - } else { - self::$testArgsJSON['testI64'] = '{"1":{"i64":1152921504606847000}}'; - self::$testArgsJSON['testStruct'] = '{"1":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":1152921504606847000}}}}'; - self::$testArgsJSON['testNest'] = '{"1":{"rec":{"1":{"i8":1},"2":{"rec":{"1":{"str":"worked"},"4":{"i8":1},"9":{"i32":1073741824},"11":{"i64":1152921504606847000}}},"3":{"i32":32768}}}}'; - } - - self::$testArgsJSON['testMap'] = '{"1":{"map":["i32","i32",3,{"7":77,"8":88,"9":99}]}}'; - - self::$testArgsJSON['testStringMap'] = '{"1":{"map":["str","str",6,{"a":"123","a b":"with spaces ","same":"same","0":"numeric key","longValue":"Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e","Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e":"long key"}]}}'; - - self::$testArgsJSON['testSet'] = '{"1":{"set":["i32",3,1,5,6]}}'; - - self::$testArgsJSON['testList'] = '{"1":{"lst":["i32",3,1,2,3]}}'; - - self::$testArgsJSON['testEnum'] = '{"1":{"i32":1}}'; - - self::$testArgsJSON['testTypedef'] = '{"1":{"i64":69}}'; - - self::$testArgsJSON['testMapMap'] = '{"0":{"map":["i32","map",2,{"4":["i32","i32",4,{"1":1,"2":2,"3":3,"4":4}],"-4":["i32","i32",4,{"-4":-4,"-3":-3,"-2":-2,"-1":-1}]}]}}'; - - self::$testArgsJSON['testInsanity'] = '{"0":{"map":["i64","map",2,{"1":["i32","rec",2,{"2":{"1":{"map":["i32","i64",2,{"5":5,"8":8}]},"2":{"lst":["rec",2,{"1":{"str":"Goodbye4"},"4":{"i8":4},"9":{"i32":4},"11":{"i64":4}},{"1":{"str":"Hello2"},"4":{"i8":2},"9":{"i32":2},"11":{"i64":2}}]}},"3":{"1":{"map":["i32","i64",2,{"5":5,"8":8}]},"2":{"lst":["rec",2,{"1":{"str":"Goodbye4"},"4":{"i8":4},"9":{"i32":4},"11":{"i64":4}},{"1":{"str":"Hello2"},"4":{"i8":2},"9":{"i32":2},"11":{"i64":2}}]}}}],"2":["i32","rec",1,{"6":{}}]}]}}'; - - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTSimpleJSONProtocol.php b/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTSimpleJSONProtocol.php deleted file mode 100755 index 973f55cde..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/Protocol/TestTSimpleJSONProtocol.php +++ /dev/null @@ -1,300 +0,0 @@ -registerNamespace('Thrift', __DIR__ . '/../../../../lib'); -$loader->registerNamespace('Test', __DIR__ . '/../../..'); -$loader->registerDefinition('ThriftTest', __DIR__ . '/../../../packages'); -$loader->register(); - -/*** - * This test suite depends on running the compiler against the - * standard ThriftTest.thrift file: - * - * lib/php/test$ ../../../compiler/cpp/thrift --gen php -r \ - * --out ./packages ../../../test/ThriftTest.thrift - */ - -class TestTSimpleJSONProtocol extends \PHPUnit_Framework_TestCase -{ - private $transport; - private $protocol; - - public static function setUpBeforeClass() - { - Fixtures::populateTestArgs(); - TestTSimpleJSONProtocol_Fixtures::populateTestArgsSimpleJSON(); - } - - public function setUp() - { - $this->transport = new TMemoryBuffer(); - $this->protocol = new TSimpleJSONProtocol($this->transport); - $this->transport->open(); - } - - /*** - * WRITE TESTS - */ - - public function testVoid_Write() - { - $args = new \ThriftTest\ThriftTest_testVoid_args(); - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testVoid']; - - $this->assertEquals( $expected, $actual ); - } - - public function testString1_Write() - { - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->thing = Fixtures::$testArgs['testString1']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testString1']; - - #$this->assertEquals( $expected, $actual ); - } - - public function testString2_Write() - { - $args = new \ThriftTest\ThriftTest_testString_args(); - $args->thing = Fixtures::$testArgs['testString2']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testString2']; - - $this->assertEquals( $expected, $actual ); - } - - public function testDouble_Write() - { - $args = new \ThriftTest\ThriftTest_testDouble_args(); - $args->thing = Fixtures::$testArgs['testDouble']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testDouble']; - - $this->assertEquals( $expected, $actual ); - } - - public function testByte_Write() - { - $args = new \ThriftTest\ThriftTest_testByte_args(); - $args->thing = Fixtures::$testArgs['testByte']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testByte']; - - $this->assertEquals( $expected, $actual ); - } - - public function testI32_Write() - { - $args = new \ThriftTest\ThriftTest_testI32_args(); - $args->thing = Fixtures::$testArgs['testI32']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testI32']; - - $this->assertEquals( $expected, $actual ); - } - - public function testI64_Write() - { - $args = new \ThriftTest\ThriftTest_testI64_args(); - $args->thing = Fixtures::$testArgs['testI64']; - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testI64']; - - $this->assertEquals( $expected, $actual ); - } - - public function testStruct_Write() - { - $args = new \ThriftTest\ThriftTest_testStruct_args(); - $args->thing = Fixtures::$testArgs['testStruct']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testStruct']; - - $this->assertEquals( $expected, $actual ); - } - - public function testNest_Write() - { - $args = new \ThriftTest\ThriftTest_testNest_args(); - $args->thing = Fixtures::$testArgs['testNest']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testNest']; - - $this->assertEquals( $expected, $actual ); - } - - public function testMap_Write() - { - $args = new \ThriftTest\ThriftTest_testMap_args(); - $args->thing = Fixtures::$testArgs['testMap']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testMap']; - - $this->assertEquals( $expected, $actual ); - } - - public function testStringMap_Write() - { - $args = new \ThriftTest\ThriftTest_testStringMap_args(); - $args->thing = Fixtures::$testArgs['testStringMap']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testStringMap']; - - $this->assertEquals( $expected, $actual ); - } - - public function testSet_Write() - { - $args = new \ThriftTest\ThriftTest_testSet_args(); - $args->thing = Fixtures::$testArgs['testSet']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testSet']; - - $this->assertEquals( $expected, $actual ); - } - - public function testList_Write() - { - $args = new \ThriftTest\ThriftTest_testList_args(); - $args->thing = Fixtures::$testArgs['testList']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testList']; - - $this->assertEquals( $expected, $actual ); - } - - public function testEnum_Write() - { - $args = new \ThriftTest\ThriftTest_testEnum_args(); - $args->thing = Fixtures::$testArgs['testEnum']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testEnum']; - - $this->assertEquals( $expected, $actual ); - } - - public function testTypedef_Write() - { - $args = new \ThriftTest\ThriftTest_testTypedef_args(); - $args->thing = Fixtures::$testArgs['testTypedef']; - - $args->write( $this->protocol ); - - $actual = $this->transport->read( BUFSIZ ); - $expected = TestTSimpleJSONProtocol_Fixtures::$testArgsJSON['testTypedef']; - - $this->assertEquals( $expected, $actual ); - } -} - -class TestTSimpleJSONProtocol_Fixtures -{ - public static $testArgsJSON = array(); - - public static function populateTestArgsSimpleJSON() - { - self::$testArgsJSON['testVoid'] = '{}'; - - self::$testArgsJSON['testString1'] = '{"1":{"str":"Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e"}}'; - - self::$testArgsJSON['testString2'] = '{"thing":"quote: \\\\\" backslash: forwardslash-escaped: \\\\\/ backspace: \\\\b formfeed: \f newline: \n return: \r tab: now-all-of-them-together: \"\\\\\\\\\/\\\\b\n\r\t now-a-bunch-of-junk: !@#$%&()(&%$#{}{}<><><"}'; - - self::$testArgsJSON['testDouble'] = '{"thing":3.1415926535898}'; - - self::$testArgsJSON['testByte'] = '{"thing":1}'; - - self::$testArgsJSON['testI32'] = '{"thing":1073741824}'; - - if (PHP_INT_SIZE == 8) { - self::$testArgsJSON['testI64'] = '{"thing":'.pow( 2, 60 ).'}'; - self::$testArgsJSON['testStruct'] = '{"thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":'.pow( 2, 60 ).'}}'; - self::$testArgsJSON['testNest'] = '{"thing":{"byte_thing":1,"struct_thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":'.pow( 2, 60 ).'},"i32_thing":32768}}'; - } else { - self::$testArgsJSON['testI64'] = '{"thing":1152921504606847000}'; - - self::$testArgsJSON['testStruct'] = '{"thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":1152921504606847000}}'; - self::$testArgsJSON['testNest'] = '{"thing":{"byte_thing":1,"struct_thing":{"string_thing":"worked","byte_thing":1,"i32_thing":1073741824,"i64_thing":1152921504606847000},"i32_thing":32768}}'; - } - - self::$testArgsJSON['testMap'] = '{"thing":{"7":77,"8":88,"9":99}}'; - - self::$testArgsJSON['testStringMap'] = '{"thing":{"a":"123","a b":"with spaces ","same":"same","0":"numeric key","longValue":"Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e","Afrikaans, Alemannisch, Aragon\u00e9s, \u0627\u0644\u0639\u0631\u0628\u064a\u0629, \u0645\u0635\u0631\u0649, Asturianu, Aymar aru, Az\u0259rbaycan, \u0411\u0430\u0448\u04a1\u043e\u0440\u0442, Boarisch, \u017demait\u0117\u0161ka, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f, \u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f (\u0442\u0430\u0440\u0430\u0448\u043a\u0435\u0432\u0456\u0446\u0430), \u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438, Bamanankan, \u09ac\u09be\u0982\u09b2\u09be, Brezhoneg, Bosanski, Catal\u00e0, M\u00ecng-d\u0115\u0324ng-ng\u1e73\u0304, \u041d\u043e\u0445\u0447\u0438\u0439\u043d, Cebuano, \u13e3\u13b3\u13a9, \u010cesky, \u0421\u043b\u043e\u0432\u0463\u0301\u043d\u044c\u0441\u043a\u044a \/ \u2c14\u2c0e\u2c11\u2c02\u2c21\u2c10\u2c20\u2c14\u2c0d\u2c1f, \u0427\u04d1\u0432\u0430\u0448\u043b\u0430, Cymraeg, Dansk, Zazaki, \u078b\u07a8\u0788\u07ac\u0780\u07a8\u0784\u07a6\u0790\u07b0, \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac, Emili\u00e0n e rumagn\u00f2l, English, Esperanto, Espa\u00f1ol, Eesti, Euskara, \u0641\u0627\u0631\u0633\u06cc, Suomi, V\u00f5ro, F\u00f8royskt, Fran\u00e7ais, Arpetan, Furlan, Frysk, Gaeilge, \u8d1b\u8a9e, G\u00e0idhlig, Galego, Ava\u00f1e\'\u1ebd, \u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0, Gaelg, \u05e2\u05d1\u05e8\u05d9\u05ea, \u0939\u093f\u0928\u094d\u0926\u0940, Fiji Hindi, Hrvatski, Krey\u00f2l ayisyen, Magyar, \u0540\u0561\u0575\u0565\u0580\u0565\u0576, Interlingua, Bahasa Indonesia, Ilokano, Ido, \u00cdslenska, Italiano, \u65e5\u672c\u8a9e, Lojban, Basa Jawa, \u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8, Kongo, Kalaallisut, \u0c95\u0ca8\u0ccd\u0ca8\u0ca1, \ud55c\uad6d\uc5b4, \u041a\u044a\u0430\u0440\u0430\u0447\u0430\u0439-\u041c\u0430\u043b\u043a\u044a\u0430\u0440, Ripoarisch, Kurd\u00ee, \u041a\u043e\u043c\u0438, Kernewek, \u041a\u044b\u0440\u0433\u044b\u0437\u0447\u0430, Latina, Ladino, L\u00ebtzebuergesch, Limburgs, Ling\u00e1la, \u0ea5\u0eb2\u0ea7, Lietuvi\u0173, Latvie\u0161u, Basa Banyumasan, Malagasy, \u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438, \u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02, \u092e\u0930\u093e\u0920\u0940, Bahasa Melayu, \u0645\u0627\u0632\u0650\u0631\u0648\u0646\u06cc, Nnapulitano, Nedersaksisch, \u0928\u0947\u092a\u093e\u0932 \u092d\u093e\u0937\u093e, Nederlands, \u202aNorsk (nynorsk)\u202c, \u202aNorsk (bokm\u00e5l)\u202c, Nouormand, Din\u00e9 bizaad, Occitan, \u0418\u0440\u043e\u043d\u0430\u0443, Papiamentu, Deitsch, Norfuk \/ Pitkern, Polski, \u067e\u0646\u062c\u0627\u0628\u06cc, \u067e\u069a\u062a\u0648, Portugu\u00eas, Runa Simi, Rumantsch, Romani, Rom\u00e2n\u0103, \u0420\u0443\u0441\u0441\u043a\u0438\u0439, \u0421\u0430\u0445\u0430 \u0442\u044b\u043b\u0430, Sardu, Sicilianu, Scots, S\u00e1megiella, Simple English, Sloven\u010dina, Sloven\u0161\u010dina, \u0421\u0440\u043f\u0441\u043a\u0438 \/ Srpski, Seeltersk, Svenska, Kiswahili, \u0ba4\u0bae\u0bbf\u0bb4\u0bcd, \u0c24\u0c46\u0c32\u0c41\u0c17\u0c41, \u0422\u043e\u04b7\u0438\u043a\u04e3, \u0e44\u0e17\u0e22, T\u00fcrkmen\u00e7e, Tagalog, T\u00fcrk\u00e7e, \u0422\u0430\u0442\u0430\u0440\u0447\u0430\/Tatar\u00e7a, \u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430, \u0627\u0631\u062f\u0648, Ti\u1ebfng Vi\u1ec7t, Volap\u00fck, Walon, Winaray, \u5434\u8bed, isiXhosa, \u05d9\u05d9\u05b4\u05d3\u05d9\u05e9, Yor\u00f9b\u00e1, Ze\u00eauws, \u4e2d\u6587, B\u00e2n-l\u00e2m-g\u00fa, \u7cb5\u8a9e":"long key"}}'; - - self::$testArgsJSON['testSet'] = '{"thing":[1,5,6]}'; - - self::$testArgsJSON['testList'] = '{"thing":[1,2,3]}'; - - self::$testArgsJSON['testEnum'] = '{"thing":1}'; - - self::$testArgsJSON['testTypedef'] = '{"thing":69}'; - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/TestValidators.php b/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/TestValidators.php deleted file mode 100644 index 36cf00099..000000000 --- a/vendor/git.apache.org/thrift.git/lib/php/test/Test/Thrift/TestValidators.php +++ /dev/null @@ -1,156 +0,0 @@ -registerNamespace('Thrift', __DIR__ . '/../../../lib'); -$loader->registerDefinition('ThriftTest', __DIR__ . '/../../packages/' . $GEN_DIR); -$loader->registerDefinition('TestValidators', __DIR__ . '/../../packages/' . $GEN_DIR); -$loader->register(); - -// Would be nice to have PHPUnit here, but for now just hack it. - -set_exception_handler(function ($e) { - my_assert(false, "Unexpected exception caught: " . $e->getMessage()); -}); - -set_error_handler(function ($errno, $errmsg) { - my_assert(false, "Unexpected PHP error: " . $errmsg); -}); - -// Empty structs should not have validators -assert_has_no_read_validator('ThriftTest\EmptyStruct'); -assert_has_no_write_validator('ThriftTest\EmptyStruct'); - -// Bonk has only opt_in_req_out fields -{ - assert_has_no_read_validator('ThriftTest\Bonk'); - assert_has_a_write_validator('ThriftTest\Bonk'); - { - // Check that we can read an empty object - $bonk = new \ThriftTest\Bonk(); - $transport = new TMemoryBuffer("\000"); - $protocol = new TBinaryProtocol($transport); - $bonk->read($protocol); - } - { - // ...but not write an empty object - $bonk = new \ThriftTest\Bonk(); - $transport = new TMemoryBuffer(); - $protocol = new TBinaryProtocol($transport); - assert_protocol_exception_thrown(function () use ($bonk, $protocol) { $bonk->write($protocol); }, - 'Bonk was able to write an empty object'); - } -} - -// StructA has a single required field -{ - assert_has_a_read_validator('ThriftTest\StructA'); - assert_has_a_write_validator('ThriftTest\StructA'); - { - // Check that we are not able to write StructA with a missing required field - $structa = new \ThriftTest\StructA(); - $transport = new TMemoryBuffer(); - $protocol = new TBinaryProtocol($transport); - assert_protocol_exception_thrown(function () use ($structa, $protocol) { $structa->write($protocol); }, - 'StructA was able to write an empty object'); - } - { - // Check that we are able to read and write a message with a good StructA - $transport = new TMemoryBuffer(base64_decode('CwABAAAAA2FiYwA=')); - $protocol = new TBinaryProtocol($transport); - $structa = new \ThriftTest\StructA(); - $structa->read($protocol); - $structa->write($protocol); - } -} - -// Unions should not get write validators -assert_has_no_write_validator('TestValidators\UnionOfStrings'); - -// Service _result classes should not get any validators -assert_has_no_read_validator('TestValidators\TestService_test_result'); -assert_has_no_write_validator('TestValidators\TestService_test_result'); - -function assert_has_a_read_validator($class) -{ - my_assert(has_read_validator_method($class), - $class . ' class should have a read validator'); -} - -function assert_has_no_read_validator($class) -{ - my_assert(!has_read_validator_method($class), - $class . ' class should not have a read validator'); -} - -function assert_has_a_write_validator($class) -{ - my_assert(has_write_validator_method($class), - $class . ' class should have a write validator'); -} - -function assert_has_no_write_validator($class) -{ - my_assert(!has_write_validator_method($class), - $class . ' class should not have a write validator'); -} - -function assert_protocol_exception_thrown($callable, $message) -{ - try { - call_user_func($callable); - my_assert(false, $message); - } catch (TProtocolException $e) { - } -} - -function has_write_validator_method($class) -{ - $rc = new \ReflectionClass($class); - - return $rc->hasMethod('_validateForWrite'); -} - -function has_read_validator_method($class) -{ - $rc = new \ReflectionClass($class); - - return $rc->hasMethod('_validateForRead'); -} - -function my_assert($something, $message) -{ - if (!$something) { - fwrite(STDERR, basename(__FILE__) . " FAILED: $message\n"); - exit(1); - } -} diff --git a/vendor/git.apache.org/thrift.git/lib/php/test/Validator/BaseValidatorTest.php b/vendor/git.apache.org/thrift.git/lib/php/test/Validator/BaseValidatorTest.php new file mode 100644 index 000000000..60290830e --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Validator/BaseValidatorTest.php @@ -0,0 +1,154 @@ +assertNoReadValidator('ThriftTest\EmptyStruct'); + $this->assertNoWriteValidator('ThriftTest\EmptyStruct'); + } + + public function testBonkValidator() + { + $this->assertNoReadValidator('ThriftTest\Bonk'); + $this->assertHasWriteValidator('ThriftTest\Bonk'); + } + + public function testStructAValidator() + { + $this->assertHasReadValidator('ThriftTest\StructA'); + $this->assertHasWriteValidator('ThriftTest\StructA'); + } + + public function testUnionOfStringsValidator() + { + $this->assertNoWriteValidator('TestValidators\UnionOfStrings'); + } + + public function testServiceResultValidator() + { + $this->assertNoReadValidator('TestValidators\TestService_test_result'); + $this->assertNoWriteValidator('TestValidators\TestService_test_result'); + } + + public function testReadEmpty() + { + $bonk = new \ThriftTest\Bonk(); + $transport = new TMemoryBuffer("\000"); + $protocol = new TBinaryProtocol($transport); + $bonk->read($protocol); + } + + public function testWriteEmpty() + { + $bonk = new \ThriftTest\Bonk(); + $transport = new TMemoryBuffer(); + $protocol = new TBinaryProtocol($transport); + try { + $bonk->write($protocol); + $this->fail('Bonk was able to write an empty object'); + } catch (TProtocolException $e) { + } + } + + public function testWriteWithMissingRequired() + { + // Check that we are not able to write StructA with a missing required field + $structa = new \ThriftTest\StructA(); + $transport = new TMemoryBuffer(); + $protocol = new TBinaryProtocol($transport); + + try { + $structa->write($protocol); + $this->fail('StructA was able to write an empty object'); + } catch (TProtocolException $e) { + } + } + + public function testReadStructA() + { + $transport = new TMemoryBuffer(base64_decode('CwABAAAAA2FiYwA=')); + $protocol = new TBinaryProtocol($transport); + $structa = new \ThriftTest\StructA(); + $structa->read($protocol); + $this->assertEquals("abc", $structa->s); + } + + public function testWriteStructA() + { + $transport = new TMemoryBuffer(); + $protocol = new TBinaryProtocol($transport); + $structa = new \ThriftTest\StructA(); + $structa->s = "abc"; + $structa->write($protocol); + $writeResult = base64_encode($transport->getBuffer()); + $this->assertEquals('CwABAAAAA2FiYwA=', $writeResult); + } + + protected static function assertHasReadValidator($class) + { + if (!static::hasReadValidator($class)) { + static::fail($class . ' class should have a read validator'); + } + } + + protected static function assertNoReadValidator($class) + { + if (static::hasReadValidator($class)) { + static::fail($class . ' class should not have a write validator'); + } + } + + protected static function assertHasWriteValidator($class) + { + if (!static::hasWriteValidator($class)) { + static::fail($class . ' class should have a write validator'); + } + } + + protected static function assertNoWriteValidator($class) + { + if (static::hasWriteValidator($class)) { + static::fail($class . ' class should not have a write validator'); + } + } + + private static function hasReadValidator($class) + { + $rc = new \ReflectionClass($class); + + return $rc->hasMethod('_validateForRead'); + } + + private static function hasWriteValidator($class) + { + $rc = new \ReflectionClass($class); + + return $rc->hasMethod('_validateForWrite'); + } +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor.go b/vendor/git.apache.org/thrift.git/lib/php/test/Validator/ValidatorTest.php similarity index 62% rename from vendor/git.apache.org/thrift.git/lib/go/thrift/processor.go rename to vendor/git.apache.org/thrift.git/lib/php/test/Validator/ValidatorTest.php index 566aaaf71..fa6c7a9f7 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor.go +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Validator/ValidatorTest.php @@ -1,5 +1,4 @@ -// +build !go1.7 - +addPsr4('', __DIR__ . '/../packages/phpv'); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_go17.go b/vendor/git.apache.org/thrift.git/lib/php/test/Validator/ValidatorTestOop.php similarity index 61% rename from vendor/git.apache.org/thrift.git/lib/go/thrift/processor_go17.go rename to vendor/git.apache.org/thrift.git/lib/php/test/Validator/ValidatorTestOop.php index fb0b165dc..93bca4d0c 100644 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_go17.go +++ b/vendor/git.apache.org/thrift.git/lib/php/test/Validator/ValidatorTestOop.php @@ -1,5 +1,4 @@ -// +build go1.7 - +addPsr4('', __DIR__ . '/../packages/phpvo'); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/py/setup.py b/vendor/git.apache.org/thrift.git/lib/py/setup.py index e3435c775..4c2877abc 100644 --- a/vendor/git.apache.org/thrift.git/lib/py/setup.py +++ b/vendor/git.apache.org/thrift.git/lib/py/setup.py @@ -31,7 +31,10 @@ from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatfo # Fix to build sdist under vagrant import os if 'vagrant' in str(os.environ): - del os.link + try: + del os.link + except AttributeError: + pass include_dirs = ['src'] if sys.platform == 'win32': @@ -87,7 +90,7 @@ def run_setup(with_binary): twisted_deps = ['twisted'] setup(name='thrift', - version='0.11.0', + version='1.0.0-dev', description='Python bindings for the Apache Thrift RPC system', author='Thrift Developers', author_email='dev@thrift.apache.org', diff --git a/vendor/git.apache.org/thrift.git/lib/py/src/server/TNonblockingServer.py b/vendor/git.apache.org/thrift.git/lib/py/src/server/TNonblockingServer.py index 26c0f7e1f..f62d486eb 100644 --- a/vendor/git.apache.org/thrift.git/lib/py/src/server/TNonblockingServer.py +++ b/vendor/git.apache.org/thrift.git/lib/py/src/server/TNonblockingServer.py @@ -174,7 +174,7 @@ class Connection(object): self._wbuf = b'' self.len = 0 else: - self._wbuf = self.message[sent:] + self._wbuf = self._wbuf[sent:] @locked def ready(self, all_ok, message): diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/base_protocol.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/base_protocol.rb index 88f44d46d..5c693e99f 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/base_protocol.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/base_protocol.rb @@ -369,11 +369,19 @@ module Thrift read_list_end end end + + def to_s + "#{trans.to_s}" + end end class BaseProtocolFactory def get_protocol(trans) raise NotImplementedError end + + def to_s + "base" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol.rb index e70b1e3a0..d8279dbe6 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol.rb @@ -226,12 +226,19 @@ module Thrift size = read_i32 trans.read_all(size) end - + + def to_s + "binary(#{super.to_s})" + end end class BinaryProtocolFactory < BaseProtocolFactory def get_protocol(trans) return Thrift::BinaryProtocol.new(trans) end + + def to_s + "binary" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol_accelerated.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol_accelerated.rb index 70ea652c8..09b02644d 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol_accelerated.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/binary_protocol_accelerated.rb @@ -35,5 +35,13 @@ module Thrift BinaryProtocol.new(trans) end end + + def to_s + if (defined? BinaryProtocolAccelerated) + "binary-accel" + else + "binary" + end + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/compact_protocol.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/compact_protocol.rb index 605eea67f..1f9bd3060 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/compact_protocol.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/compact_protocol.rb @@ -345,6 +345,10 @@ module Thrift size = read_varint32() trans.read_all(size) end + + def to_s + "compact(#{super.to_s})" + end private @@ -431,5 +435,9 @@ module Thrift def get_protocol(trans) CompactProtocol.new(trans) end + + def to_s + "compact" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/json_protocol.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/json_protocol.rb index 514bdbf6f..91e74e46b 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/json_protocol.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/json_protocol.rb @@ -768,11 +768,19 @@ module Thrift def read_binary read_json_base64 end + + def to_s + "json(#{super.to_s})" + end end class JsonProtocolFactory < BaseProtocolFactory def get_protocol(trans) return Thrift::JsonProtocol.new(trans) end + + def to_s + "json" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/multiplexed_protocol.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/multiplexed_protocol.rb index 13c9d93e1..b4428a734 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/multiplexed_protocol.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/protocol/multiplexed_protocol.rb @@ -36,5 +36,9 @@ module Thrift @protocol.write_message_begin(name, type, seqid) end end + + def to_s + "multiplexed(#{@service_name=@protocol.to_s})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/base_server.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/base_server.rb index 1ee121333..aa4d09ce4 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/base_server.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/base_server.rb @@ -26,6 +26,12 @@ module Thrift @protocol_factory = protocol_factory ? protocol_factory : Thrift::BinaryProtocolFactory.new end - def serve; nil; end + def serve + raise NotImplementedError + end + + def to_s + "server(#{@protocol_factory.to_s}(#{@transport_factory.to_s}(#{@server_transport.to_s})))" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/simple_server.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/simple_server.rb index 21e865926..905fe9bd8 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/simple_server.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/simple_server.rb @@ -39,5 +39,9 @@ module Thrift @server_transport.close end end + + def to_s + "simple(#{super.to_s})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/thread_pool_server.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/thread_pool_server.rb index 8cec805a9..bb754ad2b 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/thread_pool_server.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/thread_pool_server.rb @@ -71,5 +71,9 @@ module Thrift @server_transport.close end end + + def to_s + "threadpool(#{super.to_s})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/threaded_server.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/threaded_server.rb index a2c917cb8..88ee1833f 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/threaded_server.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/server/threaded_server.rb @@ -43,5 +43,9 @@ module Thrift @server_transport.close end end + + def to_s + "threaded(#{super.to_s})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_server_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_server_transport.rb index 68c5af076..0105463f8 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_server_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_server_transport.rb @@ -34,4 +34,4 @@ module Thrift raise NotImplementedError end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_transport.rb index 879032644..97e59352a 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/base_transport.rb @@ -99,11 +99,19 @@ module Thrift alias_method :<<, :write def flush; end + + def to_s + "base" + end end class BaseTransportFactory def get_transport(trans) return trans end + + def to_s + "base" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/buffered_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/buffered_transport.rb index 781d3c69c..4fe9c41a5 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/buffered_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/buffered_transport.rb @@ -104,11 +104,19 @@ module Thrift @transport.flush end + + def to_s + "buffered(#{@transport.to_s})" + end end class BufferedTransportFactory < BaseTransportFactory def get_transport(transport) return BufferedTransport.new(transport) end + + def to_s + "buffered" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/framed_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/framed_transport.rb index d806ce022..953177821 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/framed_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/framed_transport.rb @@ -99,6 +99,10 @@ module Thrift @wbuf = Bytes.empty_byte_buffer end + def to_s + "framed(#{@transport.to_s})" + end + private def read_frame @@ -113,5 +117,9 @@ module Thrift def get_transport(transport) return FramedTransport.new(transport) end + + def to_s + "framed" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/http_client_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/http_client_transport.rb index c9c4fec8d..5c1dd5c8a 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/http_client_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/http_client_transport.rb @@ -53,5 +53,9 @@ module Thrift ensure @outbuf = Bytes.empty_byte_buffer end + + def to_s + "@{self.url}" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/io_stream_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/io_stream_transport.rb index e3c8379da..ccec68f25 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/io_stream_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/io_stream_transport.rb @@ -35,5 +35,8 @@ module Thrift def write(buf); @output.write(Bytes.force_binary_encoding(buf)) end def close; @input.close; @output.close end def to_io; @input end # we're assuming this is used in a IO.select for reading + def to_s + "iostream(input=#{@input.to_s},output=#{@output.to_s})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/memory_buffer_transport.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/memory_buffer_transport.rb index ad5ad8555..469ea7396 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/memory_buffer_transport.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/memory_buffer_transport.rb @@ -121,5 +121,9 @@ module Thrift end out.join(" ") end + + def to_s + "memory" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/server_socket.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/server_socket.rb index 7feb9ab0d..50002324e 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/server_socket.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/server_socket.rb @@ -59,5 +59,10 @@ module Thrift end alias to_io handle + + def to_s + "socket(#{@host}:#{@port})" + end + end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/socket.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/socket.rb index 517d112aa..f5e6f3b85 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/socket.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/socket.rb @@ -134,8 +134,10 @@ module Thrift @handle = nil end - def to_io - @handle + alias to_io handle + + def to_s + "socket(#{@host}:#{@port})" end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_server_socket.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_server_socket.rb index abc134390..3abd5ec3d 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_server_socket.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_server_socket.rb @@ -33,5 +33,9 @@ module Thrift socket = TCPServer.new(@host, @port) @handle = OpenSSL::SSL::SSLServer.new(socket, @ssl_context) end + + def to_s + "ssl(#{super.to_s})" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_socket.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_socket.rb index dbbcc94fa..7ab96ab45 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_socket.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/ssl_socket.rb @@ -43,5 +43,9 @@ module Thrift raise TransportException.new(TransportException::NOT_OPEN, "Could not connect to #{@desc}: #{e}") end end + + def to_s + "ssl(#{super.to_s})" + end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_server_socket.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_server_socket.rb index a135d25f2..057d122e7 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_server_socket.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_server_socket.rb @@ -56,5 +56,9 @@ module Thrift end alias to_io handle + + def to_s + "domain(#{@path})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_socket.rb b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_socket.rb index 8f692e4c8..5dffd59f2 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_socket.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/lib/thrift/transport/unix_socket.rb @@ -36,5 +36,9 @@ module Thrift raise TransportException.new(TransportException::NOT_OPEN, "Could not open UNIX socket at #{@path}") end end + + def to_s + "domain(#{@path})" + end end -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/base_protocol_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/base_protocol_spec.rb index ec50c4823..eca936b23 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/base_protocol_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/base_protocol_spec.rb @@ -22,41 +22,46 @@ require 'spec_helper' describe 'BaseProtocol' do before(:each) do - @trans = mock("MockTransport") + @trans = double("MockTransport") @prot = Thrift::BaseProtocol.new(@trans) end describe Thrift::BaseProtocol do # most of the methods are stubs, so we can ignore them + it "should provide a reasonable to_s" do + expect(@trans).to receive(:to_s).once.and_return("trans") + expect(@prot.to_s).to eq("trans") + end + it "should make trans accessible" do - @prot.trans.should eql(@trans) + expect(@prot.trans).to eql(@trans) end it 'should write out a field nicely (deprecated write_field signature)' do - @prot.should_receive(:write_field_begin).with('field', 'type', 'fid').ordered - @prot.should_receive(:write_type).with({:name => 'field', :type => 'type'}, 'value').ordered - @prot.should_receive(:write_field_end).ordered + expect(@prot).to receive(:write_field_begin).with('field', 'type', 'fid').ordered + expect(@prot).to receive(:write_type).with({:name => 'field', :type => 'type'}, 'value').ordered + expect(@prot).to receive(:write_field_end).ordered @prot.write_field('field', 'type', 'fid', 'value') end it 'should write out a field nicely' do - @prot.should_receive(:write_field_begin).with('field', 'type', 'fid').ordered - @prot.should_receive(:write_type).with({:name => 'field', :type => 'type', :binary => false}, 'value').ordered - @prot.should_receive(:write_field_end).ordered + expect(@prot).to receive(:write_field_begin).with('field', 'type', 'fid').ordered + expect(@prot).to receive(:write_type).with({:name => 'field', :type => 'type', :binary => false}, 'value').ordered + expect(@prot).to receive(:write_field_end).ordered @prot.write_field({:name => 'field', :type => 'type', :binary => false}, 'fid', 'value') end it 'should write out the different types (deprecated write_type signature)' do - @prot.should_receive(:write_bool).with('bool').ordered - @prot.should_receive(:write_byte).with('byte').ordered - @prot.should_receive(:write_double).with('double').ordered - @prot.should_receive(:write_i16).with('i16').ordered - @prot.should_receive(:write_i32).with('i32').ordered - @prot.should_receive(:write_i64).with('i64').ordered - @prot.should_receive(:write_string).with('string').ordered - struct = mock('Struct') - struct.should_receive(:write).with(@prot).ordered + expect(@prot).to receive(:write_bool).with('bool').ordered + expect(@prot).to receive(:write_byte).with('byte').ordered + expect(@prot).to receive(:write_double).with('double').ordered + expect(@prot).to receive(:write_i16).with('i16').ordered + expect(@prot).to receive(:write_i32).with('i32').ordered + expect(@prot).to receive(:write_i64).with('i64').ordered + expect(@prot).to receive(:write_string).with('string').ordered + struct = double('Struct') + expect(struct).to receive(:write).with(@prot).ordered @prot.write_type(Thrift::Types::BOOL, 'bool') @prot.write_type(Thrift::Types::BYTE, 'byte') @prot.write_type(Thrift::Types::DOUBLE, 'double') @@ -72,16 +77,16 @@ describe 'BaseProtocol' do end it 'should write out the different types' do - @prot.should_receive(:write_bool).with('bool').ordered - @prot.should_receive(:write_byte).with('byte').ordered - @prot.should_receive(:write_double).with('double').ordered - @prot.should_receive(:write_i16).with('i16').ordered - @prot.should_receive(:write_i32).with('i32').ordered - @prot.should_receive(:write_i64).with('i64').ordered - @prot.should_receive(:write_string).with('string').ordered - @prot.should_receive(:write_binary).with('binary').ordered - struct = mock('Struct') - struct.should_receive(:write).with(@prot).ordered + expect(@prot).to receive(:write_bool).with('bool').ordered + expect(@prot).to receive(:write_byte).with('byte').ordered + expect(@prot).to receive(:write_double).with('double').ordered + expect(@prot).to receive(:write_i16).with('i16').ordered + expect(@prot).to receive(:write_i32).with('i32').ordered + expect(@prot).to receive(:write_i64).with('i64').ordered + expect(@prot).to receive(:write_string).with('string').ordered + expect(@prot).to receive(:write_binary).with('binary').ordered + struct = double('Struct') + expect(struct).to receive(:write).with(@prot).ordered @prot.write_type({:type => Thrift::Types::BOOL}, 'bool') @prot.write_type({:type => Thrift::Types::BYTE}, 'byte') @prot.write_type({:type => Thrift::Types::DOUBLE}, 'double') @@ -98,13 +103,13 @@ describe 'BaseProtocol' do end it 'should read the different types (deprecated read_type signature)' do - @prot.should_receive(:read_bool).ordered - @prot.should_receive(:read_byte).ordered - @prot.should_receive(:read_i16).ordered - @prot.should_receive(:read_i32).ordered - @prot.should_receive(:read_i64).ordered - @prot.should_receive(:read_double).ordered - @prot.should_receive(:read_string).ordered + expect(@prot).to receive(:read_bool).ordered + expect(@prot).to receive(:read_byte).ordered + expect(@prot).to receive(:read_i16).ordered + expect(@prot).to receive(:read_i32).ordered + expect(@prot).to receive(:read_i64).ordered + expect(@prot).to receive(:read_double).ordered + expect(@prot).to receive(:read_string).ordered @prot.read_type(Thrift::Types::BOOL) @prot.read_type(Thrift::Types::BYTE) @prot.read_type(Thrift::Types::I16) @@ -120,14 +125,14 @@ describe 'BaseProtocol' do end it 'should read the different types' do - @prot.should_receive(:read_bool).ordered - @prot.should_receive(:read_byte).ordered - @prot.should_receive(:read_i16).ordered - @prot.should_receive(:read_i32).ordered - @prot.should_receive(:read_i64).ordered - @prot.should_receive(:read_double).ordered - @prot.should_receive(:read_string).ordered - @prot.should_receive(:read_binary).ordered + expect(@prot).to receive(:read_bool).ordered + expect(@prot).to receive(:read_byte).ordered + expect(@prot).to receive(:read_i16).ordered + expect(@prot).to receive(:read_i32).ordered + expect(@prot).to receive(:read_i64).ordered + expect(@prot).to receive(:read_double).ordered + expect(@prot).to receive(:read_string).ordered + expect(@prot).to receive(:read_binary).ordered @prot.read_type({:type => Thrift::Types::BOOL}) @prot.read_type({:type => Thrift::Types::BYTE}) @prot.read_type({:type => Thrift::Types::I16}) @@ -144,13 +149,13 @@ describe 'BaseProtocol' do end it "should skip the basic types" do - @prot.should_receive(:read_bool).ordered - @prot.should_receive(:read_byte).ordered - @prot.should_receive(:read_i16).ordered - @prot.should_receive(:read_i32).ordered - @prot.should_receive(:read_i64).ordered - @prot.should_receive(:read_double).ordered - @prot.should_receive(:read_string).ordered + expect(@prot).to receive(:read_bool).ordered + expect(@prot).to receive(:read_byte).ordered + expect(@prot).to receive(:read_i16).ordered + expect(@prot).to receive(:read_i32).ordered + expect(@prot).to receive(:read_i64).ordered + expect(@prot).to receive(:read_double).ordered + expect(@prot).to receive(:read_string).ordered @prot.skip(Thrift::Types::BOOL) @prot.skip(Thrift::Types::BYTE) @prot.skip(Thrift::Types::I16) @@ -163,47 +168,47 @@ describe 'BaseProtocol' do it "should skip structs" do real_skip = @prot.method(:skip) - @prot.should_receive(:read_struct_begin).ordered - @prot.should_receive(:read_field_begin).exactly(4).times.and_return( + expect(@prot).to receive(:read_struct_begin).ordered + expect(@prot).to receive(:read_field_begin).exactly(4).times.and_return( ['field 1', Thrift::Types::STRING, 1], ['field 2', Thrift::Types::I32, 2], ['field 3', Thrift::Types::MAP, 3], [nil, Thrift::Types::STOP, 0] ) - @prot.should_receive(:read_field_end).exactly(3).times - @prot.should_receive(:read_string).exactly(3).times - @prot.should_receive(:read_i32).ordered - @prot.should_receive(:read_map_begin).ordered.and_return([Thrift::Types::STRING, Thrift::Types::STRING, 1]) + expect(@prot).to receive(:read_field_end).exactly(3).times + expect(@prot).to receive(:read_string).exactly(3).times + expect(@prot).to receive(:read_i32).ordered + expect(@prot).to receive(:read_map_begin).ordered.and_return([Thrift::Types::STRING, Thrift::Types::STRING, 1]) # @prot.should_receive(:read_string).exactly(2).times - @prot.should_receive(:read_map_end).ordered - @prot.should_receive(:read_struct_end).ordered + expect(@prot).to receive(:read_map_end).ordered + expect(@prot).to receive(:read_struct_end).ordered real_skip.call(Thrift::Types::STRUCT) end it "should skip maps" do real_skip = @prot.method(:skip) - @prot.should_receive(:read_map_begin).ordered.and_return([Thrift::Types::STRING, Thrift::Types::STRUCT, 1]) - @prot.should_receive(:read_string).ordered - @prot.should_receive(:read_struct_begin).ordered.and_return(["some_struct"]) - @prot.should_receive(:read_field_begin).ordered.and_return([nil, Thrift::Types::STOP, nil]); - @prot.should_receive(:read_struct_end).ordered - @prot.should_receive(:read_map_end).ordered + expect(@prot).to receive(:read_map_begin).ordered.and_return([Thrift::Types::STRING, Thrift::Types::STRUCT, 1]) + expect(@prot).to receive(:read_string).ordered + expect(@prot).to receive(:read_struct_begin).ordered.and_return(["some_struct"]) + expect(@prot).to receive(:read_field_begin).ordered.and_return([nil, Thrift::Types::STOP, nil]); + expect(@prot).to receive(:read_struct_end).ordered + expect(@prot).to receive(:read_map_end).ordered real_skip.call(Thrift::Types::MAP) end it "should skip sets" do real_skip = @prot.method(:skip) - @prot.should_receive(:read_set_begin).ordered.and_return([Thrift::Types::I64, 9]) - @prot.should_receive(:read_i64).ordered.exactly(9).times - @prot.should_receive(:read_set_end) + expect(@prot).to receive(:read_set_begin).ordered.and_return([Thrift::Types::I64, 9]) + expect(@prot).to receive(:read_i64).ordered.exactly(9).times + expect(@prot).to receive(:read_set_end) real_skip.call(Thrift::Types::SET) end it "should skip lists" do real_skip = @prot.method(:skip) - @prot.should_receive(:read_list_begin).ordered.and_return([Thrift::Types::DOUBLE, 11]) - @prot.should_receive(:read_double).ordered.exactly(11).times - @prot.should_receive(:read_list_end) + expect(@prot).to receive(:read_list_begin).ordered.and_return([Thrift::Types::DOUBLE, 11]) + expect(@prot).to receive(:read_double).ordered.exactly(11).times + expect(@prot).to receive(:read_list_end) real_skip.call(Thrift::Types::LIST) end end @@ -211,7 +216,11 @@ describe 'BaseProtocol' do describe Thrift::BaseProtocolFactory do it "should raise NotImplementedError" do # returning nil since Protocol is just an abstract class - lambda {Thrift::BaseProtocolFactory.new.get_protocol(mock("MockTransport"))}.should raise_error(NotImplementedError) + expect {Thrift::BaseProtocolFactory.new.get_protocol(double("MockTransport"))}.to raise_error(NotImplementedError) + end + + it "should provide a reasonable to_s" do + expect(Thrift::BaseProtocolFactory.new.to_s).to eq("base") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/base_transport_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/base_transport_spec.rb index 4196572da..d2f60aaea 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/base_transport_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/base_transport_spec.rb @@ -24,105 +24,119 @@ describe 'BaseTransport' do describe Thrift::TransportException do it "should make type accessible" do exc = Thrift::TransportException.new(Thrift::TransportException::ALREADY_OPEN, "msg") - exc.type.should == Thrift::TransportException::ALREADY_OPEN - exc.message.should == "msg" + expect(exc.type).to eq(Thrift::TransportException::ALREADY_OPEN) + expect(exc.message).to eq("msg") end end describe Thrift::BaseTransport do it "should read the specified size" do transport = Thrift::BaseTransport.new - transport.should_receive(:read).with(40).ordered.and_return("10 letters") - transport.should_receive(:read).with(30).ordered.and_return("fifteen letters") - transport.should_receive(:read).with(15).ordered.and_return("more characters") - transport.read_all(40).should == "10 lettersfifteen lettersmore characters" + expect(transport).to receive(:read).with(40).ordered.and_return("10 letters") + expect(transport).to receive(:read).with(30).ordered.and_return("fifteen letters") + expect(transport).to receive(:read).with(15).ordered.and_return("more characters") + expect(transport.read_all(40)).to eq("10 lettersfifteen lettersmore characters") end it "should stub out the rest of the methods" do # can't test for stubbiness, so just make sure they're defined [:open?, :open, :close, :read, :write, :flush].each do |sym| - Thrift::BaseTransport.method_defined?(sym).should be_true + expect(Thrift::BaseTransport.method_defined?(sym)).to be_truthy end end it "should alias << to write" do - Thrift::BaseTransport.instance_method(:<<).should == Thrift::BaseTransport.instance_method(:write) + expect(Thrift::BaseTransport.instance_method(:<<)).to eq(Thrift::BaseTransport.instance_method(:write)) + end + + it "should provide a reasonable to_s" do + expect(Thrift::BaseTransport.new.to_s).to eq("base") end end describe Thrift::BaseServerTransport do it "should stub out its methods" do [:listen, :accept, :close].each do |sym| - Thrift::BaseServerTransport.method_defined?(sym).should be_true + expect(Thrift::BaseServerTransport.method_defined?(sym)).to be_truthy end end end describe Thrift::BaseTransportFactory do it "should return the transport it's given" do - transport = mock("Transport") - Thrift::BaseTransportFactory.new.get_transport(transport).should eql(transport) + transport = double("Transport") + expect(Thrift::BaseTransportFactory.new.get_transport(transport)).to eql(transport) + end + + it "should provide a reasonable to_s" do + expect(Thrift::BaseTransportFactory.new.to_s).to eq("base") end end describe Thrift::BufferedTransport do + it "should provide a to_s that describes the encapsulation" do + trans = double("Transport") + expect(trans).to receive(:to_s).and_return("mock") + expect(Thrift::BufferedTransport.new(trans).to_s).to eq("buffered(mock)") + end + it "should pass through everything but write/flush/read" do - trans = mock("Transport") - trans.should_receive(:open?).ordered.and_return("+ open?") - trans.should_receive(:open).ordered.and_return("+ open") - trans.should_receive(:flush).ordered # from the close - trans.should_receive(:close).ordered.and_return("+ close") + trans = double("Transport") + expect(trans).to receive(:open?).ordered.and_return("+ open?") + expect(trans).to receive(:open).ordered.and_return("+ open") + expect(trans).to receive(:flush).ordered # from the close + expect(trans).to receive(:close).ordered.and_return("+ close") btrans = Thrift::BufferedTransport.new(trans) - btrans.open?.should == "+ open?" - btrans.open.should == "+ open" - btrans.close.should == "+ close" + expect(btrans.open?).to eq("+ open?") + expect(btrans.open).to eq("+ open") + expect(btrans.close).to eq("+ close") end it "should buffer reads in chunks of #{Thrift::BufferedTransport::DEFAULT_BUFFER}" do - trans = mock("Transport") - trans.should_receive(:read).with(Thrift::BufferedTransport::DEFAULT_BUFFER).and_return("lorum ipsum dolor emet") + trans = double("Transport") + expect(trans).to receive(:read).with(Thrift::BufferedTransport::DEFAULT_BUFFER).and_return("lorum ipsum dolor emet") btrans = Thrift::BufferedTransport.new(trans) - btrans.read(6).should == "lorum " - btrans.read(6).should == "ipsum " - btrans.read(6).should == "dolor " - btrans.read(6).should == "emet" + expect(btrans.read(6)).to eq("lorum ") + expect(btrans.read(6)).to eq("ipsum ") + expect(btrans.read(6)).to eq("dolor ") + expect(btrans.read(6)).to eq("emet") end it "should buffer writes and send them on flush" do - trans = mock("Transport") + trans = double("Transport") btrans = Thrift::BufferedTransport.new(trans) btrans.write("one/") btrans.write("two/") btrans.write("three/") - trans.should_receive(:write).with("one/two/three/").ordered - trans.should_receive(:flush).ordered + expect(trans).to receive(:write).with("one/two/three/").ordered + expect(trans).to receive(:flush).ordered btrans.flush end it "should only send buffered data once" do - trans = mock("Transport") + trans = double("Transport") btrans = Thrift::BufferedTransport.new(trans) btrans.write("one/") btrans.write("two/") btrans.write("three/") - trans.should_receive(:write).with("one/two/three/") - trans.stub!(:flush) + expect(trans).to receive(:write).with("one/two/three/") + allow(trans).to receive(:flush) btrans.flush # Nothing to flush with no data btrans.flush end it "should flush on close" do - trans = mock("Transport") - trans.should_receive(:close) + trans = double("Transport") + expect(trans).to receive(:close) btrans = Thrift::BufferedTransport.new(trans) - btrans.should_receive(:flush) + expect(btrans).to receive(:flush) btrans.close end it "should not write to socket if there's no data" do - trans = mock("Transport") - trans.should_receive(:flush) + trans = double("Transport") + expect(trans).to receive(:flush) btrans = Thrift::BufferedTransport.new(trans) btrans.flush end @@ -130,80 +144,90 @@ describe 'BaseTransport' do describe Thrift::BufferedTransportFactory do it "should wrap the given transport in a BufferedTransport" do - trans = mock("Transport") - btrans = mock("BufferedTransport") - Thrift::BufferedTransport.should_receive(:new).with(trans).and_return(btrans) - Thrift::BufferedTransportFactory.new.get_transport(trans).should == btrans + trans = double("Transport") + btrans = double("BufferedTransport") + expect(Thrift::BufferedTransport).to receive(:new).with(trans).and_return(btrans) + expect(Thrift::BufferedTransportFactory.new.get_transport(trans)).to eq(btrans) + end + + it "should provide a reasonable to_s" do + expect(Thrift::BufferedTransportFactory.new.to_s).to eq("buffered") end end describe Thrift::FramedTransport do before(:each) do - @trans = mock("Transport") + @trans = double("Transport") + end + + it "should provide a to_s that describes the encapsulation" do + trans = double("Transport") + expect(trans).to receive(:to_s).and_return("mock") + expect(Thrift::FramedTransport.new(trans).to_s).to eq("framed(mock)") end it "should pass through open?/open/close" do ftrans = Thrift::FramedTransport.new(@trans) - @trans.should_receive(:open?).ordered.and_return("+ open?") - @trans.should_receive(:open).ordered.and_return("+ open") - @trans.should_receive(:close).ordered.and_return("+ close") - ftrans.open?.should == "+ open?" - ftrans.open.should == "+ open" - ftrans.close.should == "+ close" + expect(@trans).to receive(:open?).ordered.and_return("+ open?") + expect(@trans).to receive(:open).ordered.and_return("+ open") + expect(@trans).to receive(:close).ordered.and_return("+ close") + expect(ftrans.open?).to eq("+ open?") + expect(ftrans.open).to eq("+ open") + expect(ftrans.close).to eq("+ close") end it "should pass through read when read is turned off" do ftrans = Thrift::FramedTransport.new(@trans, false, true) - @trans.should_receive(:read).with(17).ordered.and_return("+ read") - ftrans.read(17).should == "+ read" + expect(@trans).to receive(:read).with(17).ordered.and_return("+ read") + expect(ftrans.read(17)).to eq("+ read") end it "should pass through write/flush when write is turned off" do ftrans = Thrift::FramedTransport.new(@trans, true, false) - @trans.should_receive(:write).with("foo").ordered.and_return("+ write") - @trans.should_receive(:flush).ordered.and_return("+ flush") - ftrans.write("foo").should == "+ write" - ftrans.flush.should == "+ flush" + expect(@trans).to receive(:write).with("foo").ordered.and_return("+ write") + expect(@trans).to receive(:flush).ordered.and_return("+ flush") + expect(ftrans.write("foo")).to eq("+ write") + expect(ftrans.flush).to eq("+ flush") end it "should return a full frame if asked for >= the frame's length" do frame = "this is a frame" - @trans.should_receive(:read_all).with(4).and_return("\000\000\000\017") - @trans.should_receive(:read_all).with(frame.length).and_return(frame) - Thrift::FramedTransport.new(@trans).read(frame.length + 10).should == frame + expect(@trans).to receive(:read_all).with(4).and_return("\000\000\000\017") + expect(@trans).to receive(:read_all).with(frame.length).and_return(frame) + expect(Thrift::FramedTransport.new(@trans).read(frame.length + 10)).to eq(frame) end it "should return slices of the frame when asked for < the frame's length" do frame = "this is a frame" - @trans.should_receive(:read_all).with(4).and_return("\000\000\000\017") - @trans.should_receive(:read_all).with(frame.length).and_return(frame) + expect(@trans).to receive(:read_all).with(4).and_return("\000\000\000\017") + expect(@trans).to receive(:read_all).with(frame.length).and_return(frame) ftrans = Thrift::FramedTransport.new(@trans) - ftrans.read(4).should == "this" - ftrans.read(4).should == " is " - ftrans.read(16).should == "a frame" + expect(ftrans.read(4)).to eq("this") + expect(ftrans.read(4)).to eq(" is ") + expect(ftrans.read(16)).to eq("a frame") end it "should return nothing if asked for <= 0" do - Thrift::FramedTransport.new(@trans).read(-2).should == "" + expect(Thrift::FramedTransport.new(@trans).read(-2)).to eq("") end it "should pull a new frame when the first is exhausted" do frame = "this is a frame" frame2 = "yet another frame" - @trans.should_receive(:read_all).with(4).and_return("\000\000\000\017", "\000\000\000\021") - @trans.should_receive(:read_all).with(frame.length).and_return(frame) - @trans.should_receive(:read_all).with(frame2.length).and_return(frame2) + expect(@trans).to receive(:read_all).with(4).and_return("\000\000\000\017", "\000\000\000\021") + expect(@trans).to receive(:read_all).with(frame.length).and_return(frame) + expect(@trans).to receive(:read_all).with(frame2.length).and_return(frame2) ftrans = Thrift::FramedTransport.new(@trans) - ftrans.read(4).should == "this" - ftrans.read(8).should == " is a fr" - ftrans.read(6).should == "ame" - ftrans.read(4).should == "yet " - ftrans.read(16).should == "another frame" + expect(ftrans.read(4)).to eq("this") + expect(ftrans.read(8)).to eq(" is a fr") + expect(ftrans.read(6)).to eq("ame") + expect(ftrans.read(4)).to eq("yet ") + expect(ftrans.read(16)).to eq("another frame") end it "should buffer writes" do ftrans = Thrift::FramedTransport.new(@trans) - @trans.should_not_receive(:write) + expect(@trans).not_to receive(:write) ftrans.write("foo") ftrans.write("bar") ftrans.write("this is a frame") @@ -213,15 +237,15 @@ describe 'BaseTransport' do ftrans = Thrift::FramedTransport.new(@trans) ftrans.write("foobar", 3) ftrans.write("barfoo", 1) - @trans.stub!(:flush) - @trans.should_receive(:write).with("\000\000\000\004foob") + allow(@trans).to receive(:flush) + expect(@trans).to receive(:write).with("\000\000\000\004foob") ftrans.flush end it "should flush frames with a 4-byte header" do ftrans = Thrift::FramedTransport.new(@trans) - @trans.should_receive(:write).with("\000\000\000\035one/two/three/this is a frame").ordered - @trans.should_receive(:flush).ordered + expect(@trans).to receive(:write).with("\000\000\000\035one/two/three/this is a frame").ordered + expect(@trans).to receive(:flush).ordered ftrans.write("one/") ftrans.write("two/") ftrans.write("three/") @@ -231,22 +255,26 @@ describe 'BaseTransport' do it "should not flush the same buffered data twice" do ftrans = Thrift::FramedTransport.new(@trans) - @trans.should_receive(:write).with("\000\000\000\007foo/bar") - @trans.stub!(:flush) + expect(@trans).to receive(:write).with("\000\000\000\007foo/bar") + allow(@trans).to receive(:flush) ftrans.write("foo") ftrans.write("/bar") ftrans.flush - @trans.should_receive(:write).with("\000\000\000\000") + expect(@trans).to receive(:write).with("\000\000\000\000") ftrans.flush end end describe Thrift::FramedTransportFactory do it "should wrap the given transport in a FramedTransport" do - trans = mock("Transport") - Thrift::FramedTransport.should_receive(:new).with(trans) + trans = double("Transport") + expect(Thrift::FramedTransport).to receive(:new).with(trans) Thrift::FramedTransportFactory.new.get_transport(trans) end + + it "should provide a reasonable to_s" do + expect(Thrift::FramedTransportFactory.new.to_s).to eq("framed") + end end describe Thrift::MemoryBufferTransport do @@ -254,96 +282,106 @@ describe 'BaseTransport' do @buffer = Thrift::MemoryBufferTransport.new end + it "should provide a reasonable to_s" do + expect(@buffer.to_s).to eq("memory") + end + it "should accept a buffer on input and use it directly" do s = "this is a test" @buffer = Thrift::MemoryBufferTransport.new(s) - @buffer.read(4).should == "this" + expect(@buffer.read(4)).to eq("this") s.slice!(-4..-1) - @buffer.read(@buffer.available).should == " is a " + expect(@buffer.read(@buffer.available)).to eq(" is a ") end it "should always remain open" do - @buffer.should be_open + expect(@buffer).to be_open @buffer.close - @buffer.should be_open + expect(@buffer).to be_open end it "should respond to peek and available" do @buffer.write "some data" - @buffer.peek.should be_true - @buffer.available.should == 9 + expect(@buffer.peek).to be_truthy + expect(@buffer.available).to eq(9) @buffer.read(4) - @buffer.peek.should be_true - @buffer.available.should == 5 + expect(@buffer.peek).to be_truthy + expect(@buffer.available).to eq(5) @buffer.read(5) - @buffer.peek.should be_false - @buffer.available.should == 0 + expect(@buffer.peek).to be_falsey + expect(@buffer.available).to eq(0) end it "should be able to reset the buffer" do @buffer.write "test data" @buffer.reset_buffer("foobar") - @buffer.available.should == 6 - @buffer.read(@buffer.available).should == "foobar" + expect(@buffer.available).to eq(6) + expect(@buffer.read(@buffer.available)).to eq("foobar") @buffer.reset_buffer - @buffer.available.should == 0 + expect(@buffer.available).to eq(0) end it "should copy the given string when resetting the buffer" do s = "this is a test" @buffer.reset_buffer(s) - @buffer.available.should == 14 + expect(@buffer.available).to eq(14) @buffer.read(10) - @buffer.available.should == 4 - s.should == "this is a test" + expect(@buffer.available).to eq(4) + expect(s).to eq("this is a test") end it "should return from read what was given in write" do @buffer.write "test data" - @buffer.read(4).should == "test" - @buffer.read(@buffer.available).should == " data" + expect(@buffer.read(4)).to eq("test") + expect(@buffer.read(@buffer.available)).to eq(" data") @buffer.write "foo" @buffer.write " bar" - @buffer.read(@buffer.available).should == "foo bar" + expect(@buffer.read(@buffer.available)).to eq("foo bar") end it "should throw an EOFError when there isn't enough data in the buffer" do @buffer.reset_buffer("") - lambda{@buffer.read(1)}.should raise_error(EOFError) + expect{@buffer.read(1)}.to raise_error(EOFError) @buffer.reset_buffer("1234") - lambda{@buffer.read(5)}.should raise_error(EOFError) + expect{@buffer.read(5)}.to raise_error(EOFError) end end describe Thrift::IOStreamTransport do before(:each) do - @input = mock("Input", :closed? => false) - @output = mock("Output", :closed? => false) + @input = double("Input", :closed? => false) + @output = double("Output", :closed? => false) @trans = Thrift::IOStreamTransport.new(@input, @output) end + it "should provide a reasonable to_s" do + expect(@input).to receive(:to_s).and_return("mock_input") + expect(@output).to receive(:to_s).and_return("mock_output") + expect(@trans.to_s).to eq("iostream(input=mock_input,output=mock_output)") + end + it "should be open as long as both input or output are open" do - @trans.should be_open - @input.stub!(:closed?).and_return(true) - @trans.should be_open - @input.stub!(:closed?).and_return(false) - @output.stub!(:closed?).and_return(true) - @trans.should be_open - @input.stub!(:closed?).and_return(true) - @trans.should_not be_open + expect(@trans).to be_open + allow(@input).to receive(:closed?).and_return(true) + expect(@trans).to be_open + allow(@input).to receive(:closed?).and_return(false) + allow(@output).to receive(:closed?).and_return(true) + expect(@trans).to be_open + allow(@input).to receive(:closed?).and_return(true) + expect(@trans).not_to be_open end it "should pass through read/write to input/output" do - @input.should_receive(:read).with(17).and_return("+ read") - @output.should_receive(:write).with("foobar").and_return("+ write") - @trans.read(17).should == "+ read" - @trans.write("foobar").should == "+ write" + expect(@input).to receive(:read).with(17).and_return("+ read") + expect(@output).to receive(:write).with("foobar").and_return("+ write") + expect(@trans.read(17)).to eq("+ read") + expect(@trans.write("foobar")).to eq("+ write") end it "should close both input and output when closed" do - @input.should_receive(:close) - @output.should_receive(:close) + expect(@input).to receive(:close) + expect(@output).to receive(:close) @trans.close end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_accelerated_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_accelerated_spec.rb index bac9ea7c7..b2cd04bec 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_accelerated_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_accelerated_spec.rb @@ -33,10 +33,14 @@ if defined? Thrift::BinaryProtocolAccelerated describe Thrift::BinaryProtocolAcceleratedFactory do it "should create a BinaryProtocolAccelerated" do - Thrift::BinaryProtocolAcceleratedFactory.new.get_protocol(mock("MockTransport")).should be_instance_of(Thrift::BinaryProtocolAccelerated) + expect(Thrift::BinaryProtocolAcceleratedFactory.new.get_protocol(double("MockTransport"))).to be_instance_of(Thrift::BinaryProtocolAccelerated) + end + + it "should provide a reasonable to_s" do + expect(Thrift::BinaryProtocolAcceleratedFactory.new.to_s).to eq("binary-accel") end end end else puts "skipping BinaryProtocolAccelerated spec because it is not defined." -end \ No newline at end of file +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec.rb index 32772d3fc..065f5ce29 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec.rb @@ -38,29 +38,37 @@ describe 'BinaryProtocol' do it "should read a message header" do @trans.write([protocol_class.const_get(:VERSION_1) | Thrift::MessageTypes::REPLY].pack('N')) @trans.write([42].pack('N')) - @prot.should_receive(:read_string).and_return('testMessage') - @prot.read_message_begin.should == ['testMessage', Thrift::MessageTypes::REPLY, 42] + expect(@prot).to receive(:read_string).and_return('testMessage') + expect(@prot.read_message_begin).to eq(['testMessage', Thrift::MessageTypes::REPLY, 42]) end it "should raise an exception if the message header has the wrong version" do - @prot.should_receive(:read_i32).and_return(-1) - lambda { @prot.read_message_begin }.should raise_error(Thrift::ProtocolException, 'Missing version identifier') do |e| + expect(@prot).to receive(:read_i32).and_return(-1) + expect { @prot.read_message_begin }.to raise_error(Thrift::ProtocolException, 'Missing version identifier') do |e| e.type == Thrift::ProtocolException::BAD_VERSION end end it "should raise an exception if the message header does not exist and strict_read is enabled" do - @prot.should_receive(:read_i32).and_return(42) - @prot.should_receive(:strict_read).and_return(true) - lambda { @prot.read_message_begin }.should raise_error(Thrift::ProtocolException, 'No version identifier, old protocol client?') do |e| + expect(@prot).to receive(:read_i32).and_return(42) + expect(@prot).to receive(:strict_read).and_return(true) + expect { @prot.read_message_begin }.to raise_error(Thrift::ProtocolException, 'No version identifier, old protocol client?') do |e| e.type == Thrift::ProtocolException::BAD_VERSION end end + + it "should provide a reasonable to_s" do + expect(@prot.to_s).to eq("binary(memory)") + end end describe Thrift::BinaryProtocolFactory do it "should create a BinaryProtocol" do - Thrift::BinaryProtocolFactory.new.get_protocol(mock("MockTransport")).should be_instance_of(Thrift::BinaryProtocol) + expect(Thrift::BinaryProtocolFactory.new.get_protocol(double("MockTransport"))).to be_instance_of(Thrift::BinaryProtocol) + end + + it "should provide a reasonable to_s" do + expect(Thrift::BinaryProtocolFactory.new.to_s).to eq("binary") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec_shared.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec_shared.rb index 7a9d02872..58d65f040 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec_shared.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/binary_protocol_spec_shared.rb @@ -27,34 +27,34 @@ shared_examples_for 'a binary protocol' do end it "should define the proper VERSION_1, VERSION_MASK AND TYPE_MASK" do - protocol_class.const_get(:VERSION_MASK).should == 0xffff0000 - protocol_class.const_get(:VERSION_1).should == 0x80010000 - protocol_class.const_get(:TYPE_MASK).should == 0x000000ff + expect(protocol_class.const_get(:VERSION_MASK)).to eq(0xffff0000) + expect(protocol_class.const_get(:VERSION_1)).to eq(0x80010000) + expect(protocol_class.const_get(:TYPE_MASK)).to eq(0x000000ff) end it "should make strict_read readable" do - @prot.strict_read.should eql(true) + expect(@prot.strict_read).to eql(true) end it "should make strict_write readable" do - @prot.strict_write.should eql(true) + expect(@prot.strict_write).to eql(true) end it "should write the message header" do @prot.write_message_begin('testMessage', Thrift::MessageTypes::CALL, 17) - @trans.read(@trans.available).should == [protocol_class.const_get(:VERSION_1) | Thrift::MessageTypes::CALL, "testMessage".size, "testMessage", 17].pack("NNa11N") + expect(@trans.read(@trans.available)).to eq([protocol_class.const_get(:VERSION_1) | Thrift::MessageTypes::CALL, "testMessage".size, "testMessage", 17].pack("NNa11N")) end it "should write the message header without version when writes are not strict" do @prot = protocol_class.new(@trans, true, false) # no strict write @prot.write_message_begin('testMessage', Thrift::MessageTypes::CALL, 17) - @trans.read(@trans.available).should == "\000\000\000\vtestMessage\001\000\000\000\021" + expect(@trans.read(@trans.available)).to eq("\000\000\000\vtestMessage\001\000\000\000\021") end it "should write the message header with a version when writes are strict" do @prot = protocol_class.new(@trans) # strict write @prot.write_message_begin('testMessage', Thrift::MessageTypes::CALL, 17) - @trans.read(@trans.available).should == "\200\001\000\001\000\000\000\vtestMessage\000\000\000\021" + expect(@trans.read(@trans.available)).to eq("\200\001\000\001\000\000\000\vtestMessage\000\000\000\021") end @@ -62,64 +62,67 @@ shared_examples_for 'a binary protocol' do it "should write the field header" do @prot.write_field_begin('foo', Thrift::Types::DOUBLE, 3) - @trans.read(@trans.available).should == [Thrift::Types::DOUBLE, 3].pack("cn") + expect(@trans.read(@trans.available)).to eq([Thrift::Types::DOUBLE, 3].pack("cn")) end # field footer is a noop it "should write the STOP field" do @prot.write_field_stop - @trans.read(1).should == "\000" + expect(@trans.read(1)).to eq("\000") end it "should write the map header" do @prot.write_map_begin(Thrift::Types::STRING, Thrift::Types::LIST, 17) - @trans.read(@trans.available).should == [Thrift::Types::STRING, Thrift::Types::LIST, 17].pack("ccN"); + expect(@trans.read(@trans.available)).to eq([Thrift::Types::STRING, Thrift::Types::LIST, 17].pack("ccN")); end # map footer is a noop it "should write the list header" do @prot.write_list_begin(Thrift::Types::I16, 42) - @trans.read(@trans.available).should == [Thrift::Types::I16, 42].pack("cN") + expect(@trans.read(@trans.available)).to eq([Thrift::Types::I16, 42].pack("cN")) end # list footer is a noop it "should write the set header" do @prot.write_set_begin(Thrift::Types::I16, 42) - @trans.read(@trans.available).should == [Thrift::Types::I16, 42].pack("cN") + expect(@trans.read(@trans.available)).to eq([Thrift::Types::I16, 42].pack("cN")) end it "should write a bool" do @prot.write_bool(true) @prot.write_bool(false) - @trans.read(@trans.available).should == "\001\000" + expect(@trans.read(@trans.available)).to eq("\001\000") end it "should treat a nil bool as false" do @prot.write_bool(nil) - @trans.read(1).should == "\000" + expect(@trans.read(1)).to eq("\000") end it "should write a byte" do # byte is small enough, let's check -128..127 (-128..127).each do |i| @prot.write_byte(i) - @trans.read(1).should == [i].pack('c') + expect(@trans.read(1)).to eq([i].pack('c')) end - # handing it numbers out of signed range should clip - @trans.rspec_verify + end + + it "should clip numbers out of signed range" do (128..255).each do |i| @prot.write_byte(i) - @trans.read(1).should == [i].pack('c') + expect(@trans.read(1)).to eq([i].pack('c')) end - # and lastly, a Bignum is going to error out - lambda { @prot.write_byte(2**65) }.should raise_error(RangeError) + end + + it "errors out with a Bignum" do + expect { @prot.write_byte(2**65) }.to raise_error(RangeError) end it "should error gracefully when trying to write a nil byte" do - lambda { @prot.write_byte(nil) }.should raise_error + expect { @prot.write_byte(nil) }.to raise_error end it "should write an i16" do @@ -131,14 +134,14 @@ shared_examples_for 'a binary protocol' do # and try something out of signed range, it should clip @prot.write_i16(2**15 + 5) - @trans.read(@trans.available).should == "\200\000\374\000\000\021\000\000\330\360\006\273\177\377\200\005" + expect(@trans.read(@trans.available)).to eq("\200\000\374\000\000\021\000\000\330\360\006\273\177\377\200\005") # a Bignum should error # lambda { @prot.write_i16(2**65) }.should raise_error(RangeError) end it "should error gracefully when trying to write a nil i16" do - lambda { @prot.write_i16(nil) }.should raise_error + expect { @prot.write_i16(nil) }.to raise_error end it "should write an i32" do @@ -148,14 +151,14 @@ shared_examples_for 'a binary protocol' do @prot.write_i32(i) end # try something out of signed range, it should clip - @trans.read(@trans.available).should == "\200\000\000\000" + "\377\376\037\r" + "\377\377\366\034" + "\377\377\377\375" + "\000\000\000\000" + "\000#\340\203" + "\000\0000+" + "\177\377\377\377" + expect(@trans.read(@trans.available)).to eq("\200\000\000\000" + "\377\376\037\r" + "\377\377\366\034" + "\377\377\377\375" + "\000\000\000\000" + "\000#\340\203" + "\000\0000+" + "\177\377\377\377") [2 ** 31 + 5, 2 ** 65 + 5].each do |i| - lambda { @prot.write_i32(i) }.should raise_error(RangeError) + expect { @prot.write_i32(i) }.to raise_error(RangeError) end end it "should error gracefully when trying to write a nil i32" do - lambda { @prot.write_i32(nil) }.should raise_error + expect { @prot.write_i32(nil) }.to raise_error end it "should write an i64" do @@ -165,7 +168,7 @@ shared_examples_for 'a binary protocol' do @prot.write_i64(i) end # try something out of signed range, it should clip - @trans.read(@trans.available).should == ["\200\000\000\000\000\000\000\000", + expect(@trans.read(@trans.available)).to eq(["\200\000\000\000\000\000\000\000", "\377\377\364\303\035\244+]", "\377\377\377\377\376\231:\341", "\377\377\377\377\377\377\377\026", @@ -173,12 +176,12 @@ shared_examples_for 'a binary protocol' do "\000\000\000\000\000\000\004\317", "\000\000\000\000\000#\340\204", "\000\000\000\002\340\311~\365", - "\177\377\377\377\377\377\377\377"].join("") - lambda { @prot.write_i64(2 ** 65 + 5) }.should raise_error(RangeError) + "\177\377\377\377\377\377\377\377"].join("")) + expect { @prot.write_i64(2 ** 65 + 5) }.to raise_error(RangeError) end it "should error gracefully when trying to write a nil i64" do - lambda { @prot.write_i64(nil) }.should raise_error + expect { @prot.write_i64(nil) }.to raise_error end it "should write a double" do @@ -186,12 +189,12 @@ shared_examples_for 'a binary protocol' do values = [Float::MIN,-1231.15325, -123123.23, -23.23515123, 0, 12351.1325, 523.23, Float::MAX] values.each do |f| @prot.write_double(f) - @trans.read(@trans.available).should == [f].pack("G") + expect(@trans.read(@trans.available)).to eq([f].pack("G")) end end it "should error gracefully when trying to write a nil double" do - lambda { @prot.write_double(nil) }.should raise_error + expect { @prot.write_double(nil) }.to raise_error end if RUBY_VERSION >= '1.9' @@ -199,111 +202,111 @@ shared_examples_for 'a binary protocol' do str = 'abc' @prot.write_string(str) a = @trans.read(@trans.available) - a.encoding.should == Encoding::BINARY - a.unpack('C*').should == [0x00, 0x00, 0x00, 0x03, 0x61, 0x62, 0x63] + expect(a.encoding).to eq(Encoding::BINARY) + expect(a.unpack('C*')).to eq([0x00, 0x00, 0x00, 0x03, 0x61, 0x62, 0x63]) end it 'should write a string with unicode characters' do str = "abc \u20AC \u20AD".encode('UTF-8') @prot.write_string(str) a = @trans.read(@trans.available) - a.encoding.should == Encoding::BINARY - a.unpack('C*').should == [0x00, 0x00, 0x00, 0x0B, 0x61, 0x62, 0x63, 0x20, - 0xE2, 0x82, 0xAC, 0x20, 0xE2, 0x82, 0xAD] + expect(a.encoding).to eq(Encoding::BINARY) + expect(a.unpack('C*')).to eq([0x00, 0x00, 0x00, 0x0B, 0x61, 0x62, 0x63, 0x20, + 0xE2, 0x82, 0xAC, 0x20, 0xE2, 0x82, 0xAD]) end it 'should write should write a string with unicode characters and transcoding' do str = "abc \u20AC".encode('ISO-8859-15') @prot.write_string(str) a = @trans.read(@trans.available) - a.encoding.should == Encoding::BINARY - a.unpack('C*').should == [0x00, 0x00, 0x00, 0x07, 0x61, 0x62, 0x63, 0x20, 0xE2, 0x82, 0xAC] + expect(a.encoding).to eq(Encoding::BINARY) + expect(a.unpack('C*')).to eq([0x00, 0x00, 0x00, 0x07, 0x61, 0x62, 0x63, 0x20, 0xE2, 0x82, 0xAC]) end it 'should write a binary string' do buffer = [0, 1, 2, 3].pack('C*') @prot.write_binary(buffer) a = @trans.read(@trans.available) - a.encoding.should == Encoding::BINARY - a.unpack('C*').should == [0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x02, 0x03] + expect(a.encoding).to eq(Encoding::BINARY) + expect(a.unpack('C*')).to eq([0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x02, 0x03]) end else it 'should write a string' do str = 'abc' @prot.write_string(str) a = @trans.read(@trans.available) - a.unpack('C*').should == [0x00, 0x00, 0x00, 0x03, 0x61, 0x62, 0x63] + expect(a.unpack('C*')).to eq([0x00, 0x00, 0x00, 0x03, 0x61, 0x62, 0x63]) end it 'should write a binary string' do buffer = [0, 1, 2, 3].pack('C*') @prot.write_binary(buffer) a = @trans.read(@trans.available) - a.unpack('C*').should == [0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x02, 0x03] + expect(a.unpack('C*')).to eq([0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x02, 0x03]) end end it "should error gracefully when trying to write a nil string" do - lambda { @prot.write_string(nil) }.should raise_error + expect { @prot.write_string(nil) }.to raise_error end it "should write the message header without version when writes are not strict" do @prot = protocol_class.new(@trans, true, false) # no strict write @prot.write_message_begin('testMessage', Thrift::MessageTypes::CALL, 17) - @trans.read(@trans.available).should == "\000\000\000\vtestMessage\001\000\000\000\021" + expect(@trans.read(@trans.available)).to eq("\000\000\000\vtestMessage\001\000\000\000\021") end it "should write the message header with a version when writes are strict" do @prot = protocol_class.new(@trans) # strict write @prot.write_message_begin('testMessage', Thrift::MessageTypes::CALL, 17) - @trans.read(@trans.available).should == "\200\001\000\001\000\000\000\vtestMessage\000\000\000\021" + expect(@trans.read(@trans.available)).to eq("\200\001\000\001\000\000\000\vtestMessage\000\000\000\021") end # message footer is a noop it "should read a field header" do @trans.write([Thrift::Types::STRING, 3].pack("cn")) - @prot.read_field_begin.should == [nil, Thrift::Types::STRING, 3] + expect(@prot.read_field_begin).to eq([nil, Thrift::Types::STRING, 3]) end # field footer is a noop it "should read a stop field" do @trans.write([Thrift::Types::STOP].pack("c")); - @prot.read_field_begin.should == [nil, Thrift::Types::STOP, 0] + expect(@prot.read_field_begin).to eq([nil, Thrift::Types::STOP, 0]) end it "should read a map header" do @trans.write([Thrift::Types::DOUBLE, Thrift::Types::I64, 42].pack("ccN")) - @prot.read_map_begin.should == [Thrift::Types::DOUBLE, Thrift::Types::I64, 42] + expect(@prot.read_map_begin).to eq([Thrift::Types::DOUBLE, Thrift::Types::I64, 42]) end # map footer is a noop it "should read a list header" do @trans.write([Thrift::Types::STRING, 17].pack("cN")) - @prot.read_list_begin.should == [Thrift::Types::STRING, 17] + expect(@prot.read_list_begin).to eq([Thrift::Types::STRING, 17]) end # list footer is a noop it "should read a set header" do @trans.write([Thrift::Types::STRING, 17].pack("cN")) - @prot.read_set_begin.should == [Thrift::Types::STRING, 17] + expect(@prot.read_set_begin).to eq([Thrift::Types::STRING, 17]) end # set footer is a noop it "should read a bool" do @trans.write("\001\000"); - @prot.read_bool.should == true - @prot.read_bool.should == false + expect(@prot.read_bool).to eq(true) + expect(@prot.read_bool).to eq(false) end it "should read a byte" do [-128, -57, -3, 0, 17, 24, 127].each do |i| @trans.write([i].pack("c")) - @prot.read_byte.should == i + expect(@prot.read_byte).to eq(i) end end @@ -311,7 +314,7 @@ shared_examples_for 'a binary protocol' do # try a scattering of values, including min/max [-2**15, -5237, -353, 0, 1527, 2234, 2**15-1].each do |i| @trans.write([i].pack("n")); - @prot.read_i16.should == i + expect(@prot.read_i16).to eq(i) end end @@ -319,7 +322,7 @@ shared_examples_for 'a binary protocol' do # try a scattering of values, including min/max [-2**31, -235125, -6236, 0, 2351, 123123, 2**31-1].each do |i| @trans.write([i].pack("N")) - @prot.read_i32.should == i + expect(@prot.read_i32).to eq(i) end end @@ -327,7 +330,7 @@ shared_examples_for 'a binary protocol' do # try a scattering of values, including min/max [-2**63, -123512312, -6346, 0, 32, 2346322323, 2**63-1].each do |i| @trans.write([i >> 32, i & 0xFFFFFFFF].pack("NN")) - @prot.read_i64.should == i + expect(@prot.read_i64).to eq(i) end end @@ -335,7 +338,7 @@ shared_examples_for 'a binary protocol' do # try a random scattering of values, including min/max [Float::MIN, -231231.12351, -323.233513, 0, 123.2351235, 2351235.12351235, Float::MAX].each do |f| @trans.write([f].pack("G")); - @prot.read_double.should == f + expect(@prot.read_double).to eq(f) end end @@ -345,8 +348,8 @@ shared_examples_for 'a binary protocol' do buffer = [0x00, 0x00, 0x00, 0x03, 0x61, 0x62, 0x63].pack('C*') @trans.write(buffer) a = @prot.read_string - a.should == 'abc'.encode('UTF-8') - a.encoding.should == Encoding::UTF_8 + expect(a).to eq('abc'.encode('UTF-8')) + expect(a.encoding).to eq(Encoding::UTF_8) end it 'should read a string containing unicode characters from UTF-8 encoded buffer' do @@ -354,44 +357,44 @@ shared_examples_for 'a binary protocol' do buffer = [0x00, 0x00, 0x00, 0x03, 0xE2, 0x82, 0xAC].pack('C*') @trans.write(buffer) a = @prot.read_string - a.should == "\u20AC".encode('UTF-8') - a.encoding.should == Encoding::UTF_8 + expect(a).to eq("\u20AC".encode('UTF-8')) + expect(a.encoding).to eq(Encoding::UTF_8) end it 'should read a binary string' do buffer = [0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x02, 0x03].pack('C*') @trans.write(buffer) a = @prot.read_binary - a.should == [0x00, 0x01, 0x02, 0x03].pack('C*') - a.encoding.should == Encoding::BINARY + expect(a).to eq([0x00, 0x01, 0x02, 0x03].pack('C*')) + expect(a.encoding).to eq(Encoding::BINARY) end else it 'should read a string' do # i32 of value 3, followed by three characters/UTF-8 bytes 'a', 'b', 'c' buffer = [0x00, 0x00, 0x00, 0x03, 0x61, 0x62, 0x63].pack('C*') @trans.write(buffer) - @prot.read_string.should == 'abc' + expect(@prot.read_string).to eq('abc') end it 'should read a binary string' do buffer = [0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x02, 0x03].pack('C*') @trans.write(buffer) a = @prot.read_binary - a.should == [0x00, 0x01, 0x02, 0x03].pack('C*') + expect(a).to eq([0x00, 0x01, 0x02, 0x03].pack('C*')) end end it "should perform a complete rpc with no args or return" do srv_test( proc {|client| client.send_voidMethod()}, - proc {|client| client.recv_voidMethod.should == nil} + proc {|client| expect(client.recv_voidMethod).to eq(nil)} ) end it "should perform a complete rpc with a primitive return type" do srv_test( proc {|client| client.send_primitiveMethod()}, - proc {|client| client.recv_primitiveMethod.should == 1} + proc {|client| expect(client.recv_primitiveMethod).to eq(1)} ) end @@ -402,7 +405,7 @@ shared_examples_for 'a binary protocol' do result = client.recv_structMethod result.set_byte_map = nil result.map_byte_map = nil - result.should == Fixtures::COMPACT_PROTOCOL_TEST_STRUCT + expect(result).to eq(Fixtures::COMPACT_PROTOCOL_TEST_STRUCT) } ) end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/bytes_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/bytes_spec.rb index b82e304b7..2e8653cfc 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/bytes_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/bytes_spec.rb @@ -25,33 +25,33 @@ describe Thrift::Bytes do describe '.empty_byte_buffer' do it 'should create an empty buffer' do b = Thrift::Bytes.empty_byte_buffer - b.length.should == 0 - b.encoding.should == Encoding::BINARY + expect(b.length).to eq(0) + expect(b.encoding).to eq(Encoding::BINARY) end it 'should create an empty buffer of given size' do b = Thrift::Bytes.empty_byte_buffer 2 - b.length.should == 2 - b.getbyte(0).should == 0 - b.getbyte(1).should == 0 - b.encoding.should == Encoding::BINARY + expect(b.length).to eq(2) + expect(b.getbyte(0)).to eq(0) + expect(b.getbyte(1)).to eq(0) + expect(b.encoding).to eq(Encoding::BINARY) end end describe '.force_binary_encoding' do it 'should change encoding' do e = 'STRING'.encode('UTF-8') - e.encoding.should_not == Encoding::BINARY + expect(e.encoding).not_to eq(Encoding::BINARY) a = Thrift::Bytes.force_binary_encoding e - a.encoding.should == Encoding::BINARY + expect(a.encoding).to eq(Encoding::BINARY) end end describe '.get_string_byte' do it 'should get the byte at index' do s = "\x41\x42" - Thrift::Bytes.get_string_byte(s, 0).should == 0x41 - Thrift::Bytes.get_string_byte(s, 1).should == 0x42 + expect(Thrift::Bytes.get_string_byte(s, 0)).to eq(0x41) + expect(Thrift::Bytes.get_string_byte(s, 1)).to eq(0x42) end end @@ -59,42 +59,42 @@ describe Thrift::Bytes do it 'should set byte value at index' do s = "\x41\x42" Thrift::Bytes.set_string_byte(s, 0, 0x43) - s.getbyte(0).should == 0x43 - s.should == 'CB' + expect(s.getbyte(0)).to eq(0x43) + expect(s).to eq('CB') end end describe '.convert_to_utf8_byte_buffer' do it 'should convert UTF-8 String to byte buffer' do e = "\u20AC".encode('UTF-8') # a string with euro sign character U+20AC - e.length.should == 1 + expect(e.length).to eq(1) a = Thrift::Bytes.convert_to_utf8_byte_buffer e - a.encoding.should == Encoding::BINARY - a.length.should == 3 - a.unpack('C*').should == [0xE2, 0x82, 0xAC] + expect(a.encoding).to eq(Encoding::BINARY) + expect(a.length).to eq(3) + expect(a.unpack('C*')).to eq([0xE2, 0x82, 0xAC]) end it 'should convert ISO-8859-15 String to UTF-8 byte buffer' do # Assumptions e = "\u20AC".encode('ISO-8859-15') # a string with euro sign character U+20AC, then converted to ISO-8859-15 - e.length.should == 1 - e.unpack('C*').should == [0xA4] # euro sign is a different code point in ISO-8859-15 + expect(e.length).to eq(1) + expect(e.unpack('C*')).to eq([0xA4]) # euro sign is a different code point in ISO-8859-15 a = Thrift::Bytes.convert_to_utf8_byte_buffer e - a.encoding.should == Encoding::BINARY - a.length.should == 3 - a.unpack('C*').should == [0xE2, 0x82, 0xAC] + expect(a.encoding).to eq(Encoding::BINARY) + expect(a.length).to eq(3) + expect(a.unpack('C*')).to eq([0xE2, 0x82, 0xAC]) end end describe '.convert_to_string' do it 'should convert UTF-8 byte buffer to a UTF-8 String' do e = [0xE2, 0x82, 0xAC].pack("C*") - e.encoding.should == Encoding::BINARY + expect(e.encoding).to eq(Encoding::BINARY) a = Thrift::Bytes.convert_to_string e - a.encoding.should == Encoding::UTF_8 - a.should == "\u20AC" + expect(a.encoding).to eq(Encoding::UTF_8) + expect(a).to eq("\u20AC") end end @@ -102,14 +102,14 @@ describe Thrift::Bytes do describe '.empty_byte_buffer' do it 'should create an empty buffer' do b = Thrift::Bytes.empty_byte_buffer - b.length.should == 0 + expect(b.length).to eq(0) end it 'should create an empty buffer of given size' do b = Thrift::Bytes.empty_byte_buffer 2 - b.length.should == 2 - b[0].should == 0 - b[1].should == 0 + expect(b.length).to eq(2) + expect(b[0]).to eq(0) + expect(b[1]).to eq(0) end end @@ -117,16 +117,16 @@ describe Thrift::Bytes do it 'should be a no-op' do e = 'STRING' a = Thrift::Bytes.force_binary_encoding e - a.should == e - a.should be(e) + expect(a).to eq(e) + expect(a).to be(e) end end describe '.get_string_byte' do it 'should get the byte at index' do s = "\x41\x42" - Thrift::Bytes.get_string_byte(s, 0).should == 0x41 - Thrift::Bytes.get_string_byte(s, 1).should == 0x42 + expect(Thrift::Bytes.get_string_byte(s, 0)).to eq(0x41) + expect(Thrift::Bytes.get_string_byte(s, 1)).to eq(0x42) end end @@ -134,8 +134,8 @@ describe Thrift::Bytes do it 'should set byte value at index' do s = "\x41\x42" Thrift::Bytes.set_string_byte(s, 0, 0x43) - s[0].should == 0x43 - s.should == 'CB' + expect(s[0]).to eq(0x43) + expect(s).to eq('CB') end end @@ -143,8 +143,8 @@ describe Thrift::Bytes do it 'should be a no-op' do e = 'STRING' a = Thrift::Bytes.convert_to_utf8_byte_buffer e - a.should == e - a.should be(e) + expect(a).to eq(e) + expect(a).to be(e) end end @@ -152,8 +152,8 @@ describe Thrift::Bytes do it 'should be a no-op' do e = 'STRING' a = Thrift::Bytes.convert_to_string e - a.should == e - a.should be(e) + expect(a).to eq(e) + expect(a).to be(e) end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/client_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/client_spec.rb index f8ffe8a8d..d5d4ceedb 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/client_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/client_spec.rb @@ -26,74 +26,73 @@ describe 'Client' do end before(:each) do - @prot = mock("MockProtocol") + @prot = double("MockProtocol") @client = ClientSpec.new(@prot) end describe Thrift::Client do it "should re-use iprot for oprot if not otherwise specified" do - @client.instance_variable_get(:'@iprot').should eql(@prot) - @client.instance_variable_get(:'@oprot').should eql(@prot) + expect(@client.instance_variable_get(:'@iprot')).to eql(@prot) + expect(@client.instance_variable_get(:'@oprot')).to eql(@prot) end it "should send a test message" do - @prot.should_receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::CALL, 0) - mock_args = mock('#') - mock_args.should_receive(:foo=).with('foo') - mock_args.should_receive(:bar=).with(42) - mock_args.should_receive(:write).with(@prot) - @prot.should_receive(:write_message_end) - @prot.should_receive(:trans) do - mock('trans').tap do |trans| - trans.should_receive(:flush) + expect(@prot).to receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::CALL, 0) + mock_args = double('#') + expect(mock_args).to receive(:foo=).with('foo') + expect(mock_args).to receive(:bar=).with(42) + expect(mock_args).to receive(:write).with(@prot) + expect(@prot).to receive(:write_message_end) + expect(@prot).to receive(:trans) do + double('trans').tap do |trans| + expect(trans).to receive(:flush) end end - klass = stub("TestMessage_args", :new => mock_args) + klass = double("TestMessage_args", :new => mock_args) @client.send_message('testMessage', klass, :foo => 'foo', :bar => 42) end it "should increment the sequence id when sending messages" do - pending "it seems sequence ids are completely ignored right now" do - @prot.should_receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::CALL, 0).ordered - @prot.should_receive(:write_message_begin).with('testMessage2', Thrift::MessageTypes::CALL, 1).ordered - @prot.should_receive(:write_message_begin).with('testMessage3', Thrift::MessageTypes::CALL, 2).ordered - @prot.stub!(:write_message_end) - @prot.stub!(:trans).and_return mock("trans").as_null_object - @client.send_message('testMessage', mock("args class").as_null_object) - @client.send_message('testMessage2', mock("args class").as_null_object) - @client.send_message('testMessage3', mock("args class").as_null_object) - end + pending "it seems sequence ids are completely ignored right now" + @prot.expect(:write_message_begin).with('testMessage', Thrift::MessageTypes::CALL, 0).ordered + @prot.expect(:write_message_begin).with('testMessage2', Thrift::MessageTypes::CALL, 1).ordered + @prot.expect(:write_message_begin).with('testMessage3', Thrift::MessageTypes::CALL, 2).ordered + @prot.stub!(:write_message_end) + @prot.stub!(:trans).and_return double("trans").as_null_object + @client.send_message('testMessage', double("args class").as_null_object) + @client.send_message('testMessage2', double("args class").as_null_object) + @client.send_message('testMessage3', double("args class").as_null_object) end it "should receive a test message" do - @prot.should_receive(:read_message_begin).and_return [nil, Thrift::MessageTypes::CALL, 0] - @prot.should_receive(:read_message_end) - mock_klass = mock("#") - mock_klass.should_receive(:read).with(@prot) - @client.receive_message(stub("MockClass", :new => mock_klass)) + expect(@prot).to receive(:read_message_begin).and_return [nil, Thrift::MessageTypes::CALL, 0] + expect(@prot).to receive(:read_message_end) + mock_klass = double("#") + expect(mock_klass).to receive(:read).with(@prot) + @client.receive_message(double("MockClass", :new => mock_klass)) end it "should handle received exceptions" do - @prot.should_receive(:read_message_begin).and_return [nil, Thrift::MessageTypes::EXCEPTION, 0] - @prot.should_receive(:read_message_end) - Thrift::ApplicationException.should_receive(:new).and_return do + expect(@prot).to receive(:read_message_begin).and_return [nil, Thrift::MessageTypes::EXCEPTION, 0] + expect(@prot).to receive(:read_message_end) + expect(Thrift::ApplicationException).to receive(:new) do StandardError.new.tap do |mock_exc| - mock_exc.should_receive(:read).with(@prot) + expect(mock_exc).to receive(:read).with(@prot) end end - lambda { @client.receive_message(nil) }.should raise_error(StandardError) + expect { @client.receive_message(nil) }.to raise_error(StandardError) end it "should close the transport if an error occurs while sending a message" do - @prot.stub!(:write_message_begin) - @prot.should_not_receive(:write_message_end) - mock_args = mock("#") - mock_args.should_receive(:write).with(@prot).and_raise(StandardError) - trans = mock("MockTransport") - @prot.stub!(:trans).and_return(trans) - trans.should_receive(:close) - klass = mock("TestMessage_args", :new => mock_args) - lambda { @client.send_message("testMessage", klass) }.should raise_error(StandardError) + allow(@prot).to receive(:write_message_begin) + expect(@prot).not_to receive(:write_message_end) + mock_args = double("#") + expect(mock_args).to receive(:write).with(@prot).and_raise(StandardError) + trans = double("MockTransport") + allow(@prot).to receive(:trans).and_return(trans) + expect(trans).to receive(:close) + klass = double("TestMessage_args", :new => mock_args) + expect { @client.send_message("testMessage", klass) }.to raise_error(StandardError) end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/compact_protocol_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/compact_protocol_spec.rb index 8a1a228d6..513dd69cf 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/compact_protocol_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/compact_protocol_spec.rb @@ -42,7 +42,7 @@ describe Thrift::CompactProtocol do proto.send(writer(primitive_type), value) # puts "buf: #{trans.inspect_buffer}" if primitive_type == :i64 read_back = proto.send(reader(primitive_type)) - read_back.should == value + expect(read_back).to eq(value) end end end @@ -62,10 +62,10 @@ describe Thrift::CompactProtocol do proto = Thrift::CompactProtocol.new(trans) name, type, id = proto.read_field_begin - type.should == thrift_type - id.should == 15 + expect(type).to eq(thrift_type) + expect(id).to eq(15) read_back = proto.send(reader(primitive_type)) - read_back.should == value + expect(read_back).to eq(value) proto.read_field_end end end @@ -81,7 +81,7 @@ describe Thrift::CompactProtocol do struct2 = Thrift::Test::CompactProtoTestStruct.new struct2.read(proto) - struct2.should == struct + expect(struct2).to eq(struct) end it "should make method calls correctly" do @@ -97,7 +97,7 @@ describe Thrift::CompactProtocol do client.send_Janky(1) # puts client_out_trans.inspect_buffer processor.process(client_out_proto, client_in_proto) - client.recv_Janky.should == 2 + expect(client.recv_Janky).to eq(2) end it "should deal with fields following fields that have non-delta ids" do @@ -113,7 +113,7 @@ describe Thrift::CompactProtocol do deser = Thrift::Deserializer.new(Thrift::CompactProtocolFactory.new) brcp2 = Thrift::Test::BreaksRubyCompactProtocol.new deser.deserialize(brcp2, bytes) - brcp2.should == brcp + expect(brcp2).to eq(brcp) end it "should deserialize an empty map to an empty hash" do @@ -124,7 +124,12 @@ describe Thrift::CompactProtocol do deser = Thrift::Deserializer.new(Thrift::CompactProtocolFactory.new) struct2 = Thrift::Test::SingleMapTestStruct.new deser.deserialize(struct2, bytes) - struct.should == struct2 + expect(struct).to eq(struct2) + end + + it "should provide a reasonable to_s" do + trans = Thrift::MemoryBufferTransport.new + expect(Thrift::CompactProtocol.new(trans).to_s).to eq("compact(memory)") end class JankyHandler @@ -141,3 +146,13 @@ describe Thrift::CompactProtocol do "read_#{sym.to_s}" end end + +describe Thrift::CompactProtocolFactory do + it "should create a CompactProtocol" do + expect(Thrift::CompactProtocolFactory.new.get_protocol(double("MockTransport"))).to be_instance_of(Thrift::CompactProtocol) + end + + it "should provide a reasonable to_s" do + expect(Thrift::CompactProtocolFactory.new.to_s).to eq("compact") + end +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/exception_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/exception_spec.rb index d1da6217e..379ae6980 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/exception_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/exception_spec.rb @@ -24,107 +24,107 @@ describe 'Exception' do describe Thrift::Exception do it "should have an accessible message" do e = Thrift::Exception.new("test message") - e.message.should == "test message" + expect(e.message).to eq("test message") end end describe Thrift::ApplicationException do it "should inherit from Thrift::Exception" do - Thrift::ApplicationException.superclass.should == Thrift::Exception + expect(Thrift::ApplicationException.superclass).to eq(Thrift::Exception) end it "should have an accessible type and message" do e = Thrift::ApplicationException.new - e.type.should == Thrift::ApplicationException::UNKNOWN - e.message.should be_nil + expect(e.type).to eq(Thrift::ApplicationException::UNKNOWN) + expect(e.message).to be_nil e = Thrift::ApplicationException.new(Thrift::ApplicationException::UNKNOWN_METHOD, "test message") - e.type.should == Thrift::ApplicationException::UNKNOWN_METHOD - e.message.should == "test message" + expect(e.type).to eq(Thrift::ApplicationException::UNKNOWN_METHOD) + expect(e.message).to eq("test message") end it "should read a struct off of a protocol" do - prot = mock("MockProtocol") - prot.should_receive(:read_struct_begin).ordered - prot.should_receive(:read_field_begin).exactly(3).times.and_return( + prot = double("MockProtocol") + expect(prot).to receive(:read_struct_begin).ordered + expect(prot).to receive(:read_field_begin).exactly(3).times.and_return( ["message", Thrift::Types::STRING, 1], ["type", Thrift::Types::I32, 2], [nil, Thrift::Types::STOP, 0] ) - prot.should_receive(:read_string).ordered.and_return "test message" - prot.should_receive(:read_i32).ordered.and_return Thrift::ApplicationException::BAD_SEQUENCE_ID - prot.should_receive(:read_field_end).exactly(2).times - prot.should_receive(:read_struct_end).ordered + expect(prot).to receive(:read_string).ordered.and_return "test message" + expect(prot).to receive(:read_i32).ordered.and_return Thrift::ApplicationException::BAD_SEQUENCE_ID + expect(prot).to receive(:read_field_end).exactly(2).times + expect(prot).to receive(:read_struct_end).ordered e = Thrift::ApplicationException.new e.read(prot) - e.message.should == "test message" - e.type.should == Thrift::ApplicationException::BAD_SEQUENCE_ID + expect(e.message).to eq("test message") + expect(e.type).to eq(Thrift::ApplicationException::BAD_SEQUENCE_ID) end it "should skip bad fields when reading a struct" do - prot = mock("MockProtocol") - prot.should_receive(:read_struct_begin).ordered - prot.should_receive(:read_field_begin).exactly(5).times.and_return( + prot = double("MockProtocol") + expect(prot).to receive(:read_struct_begin).ordered + expect(prot).to receive(:read_field_begin).exactly(5).times.and_return( ["type", Thrift::Types::I32, 2], ["type", Thrift::Types::STRING, 2], ["message", Thrift::Types::MAP, 1], ["message", Thrift::Types::STRING, 3], [nil, Thrift::Types::STOP, 0] ) - prot.should_receive(:read_i32).and_return Thrift::ApplicationException::INVALID_MESSAGE_TYPE - prot.should_receive(:skip).with(Thrift::Types::STRING).twice - prot.should_receive(:skip).with(Thrift::Types::MAP) - prot.should_receive(:read_field_end).exactly(4).times - prot.should_receive(:read_struct_end).ordered + expect(prot).to receive(:read_i32).and_return Thrift::ApplicationException::INVALID_MESSAGE_TYPE + expect(prot).to receive(:skip).with(Thrift::Types::STRING).twice + expect(prot).to receive(:skip).with(Thrift::Types::MAP) + expect(prot).to receive(:read_field_end).exactly(4).times + expect(prot).to receive(:read_struct_end).ordered e = Thrift::ApplicationException.new e.read(prot) - e.message.should be_nil - e.type.should == Thrift::ApplicationException::INVALID_MESSAGE_TYPE + expect(e.message).to be_nil + expect(e.type).to eq(Thrift::ApplicationException::INVALID_MESSAGE_TYPE) end it "should write a Thrift::ApplicationException struct to the oprot" do - prot = mock("MockProtocol") - prot.should_receive(:write_struct_begin).with("Thrift::ApplicationException").ordered - prot.should_receive(:write_field_begin).with("message", Thrift::Types::STRING, 1).ordered - prot.should_receive(:write_string).with("test message").ordered - prot.should_receive(:write_field_begin).with("type", Thrift::Types::I32, 2).ordered - prot.should_receive(:write_i32).with(Thrift::ApplicationException::UNKNOWN_METHOD).ordered - prot.should_receive(:write_field_end).twice - prot.should_receive(:write_field_stop).ordered - prot.should_receive(:write_struct_end).ordered + prot = double("MockProtocol") + expect(prot).to receive(:write_struct_begin).with("Thrift::ApplicationException").ordered + expect(prot).to receive(:write_field_begin).with("message", Thrift::Types::STRING, 1).ordered + expect(prot).to receive(:write_string).with("test message").ordered + expect(prot).to receive(:write_field_begin).with("type", Thrift::Types::I32, 2).ordered + expect(prot).to receive(:write_i32).with(Thrift::ApplicationException::UNKNOWN_METHOD).ordered + expect(prot).to receive(:write_field_end).twice + expect(prot).to receive(:write_field_stop).ordered + expect(prot).to receive(:write_struct_end).ordered e = Thrift::ApplicationException.new(Thrift::ApplicationException::UNKNOWN_METHOD, "test message") e.write(prot) end it "should skip nil fields when writing to the oprot" do - prot = mock("MockProtocol") - prot.should_receive(:write_struct_begin).with("Thrift::ApplicationException").ordered - prot.should_receive(:write_field_begin).with("message", Thrift::Types::STRING, 1).ordered - prot.should_receive(:write_string).with("test message").ordered - prot.should_receive(:write_field_end).ordered - prot.should_receive(:write_field_stop).ordered - prot.should_receive(:write_struct_end).ordered + prot = double("MockProtocol") + expect(prot).to receive(:write_struct_begin).with("Thrift::ApplicationException").ordered + expect(prot).to receive(:write_field_begin).with("message", Thrift::Types::STRING, 1).ordered + expect(prot).to receive(:write_string).with("test message").ordered + expect(prot).to receive(:write_field_end).ordered + expect(prot).to receive(:write_field_stop).ordered + expect(prot).to receive(:write_struct_end).ordered e = Thrift::ApplicationException.new(nil, "test message") e.write(prot) - prot = mock("MockProtocol") - prot.should_receive(:write_struct_begin).with("Thrift::ApplicationException").ordered - prot.should_receive(:write_field_begin).with("type", Thrift::Types::I32, 2).ordered - prot.should_receive(:write_i32).with(Thrift::ApplicationException::BAD_SEQUENCE_ID).ordered - prot.should_receive(:write_field_end).ordered - prot.should_receive(:write_field_stop).ordered - prot.should_receive(:write_struct_end).ordered + prot = double("MockProtocol") + expect(prot).to receive(:write_struct_begin).with("Thrift::ApplicationException").ordered + expect(prot).to receive(:write_field_begin).with("type", Thrift::Types::I32, 2).ordered + expect(prot).to receive(:write_i32).with(Thrift::ApplicationException::BAD_SEQUENCE_ID).ordered + expect(prot).to receive(:write_field_end).ordered + expect(prot).to receive(:write_field_stop).ordered + expect(prot).to receive(:write_struct_end).ordered e = Thrift::ApplicationException.new(Thrift::ApplicationException::BAD_SEQUENCE_ID) e.write(prot) - prot = mock("MockProtocol") - prot.should_receive(:write_struct_begin).with("Thrift::ApplicationException").ordered - prot.should_receive(:write_field_stop).ordered - prot.should_receive(:write_struct_end).ordered + prot = double("MockProtocol") + expect(prot).to receive(:write_struct_begin).with("Thrift::ApplicationException").ordered + expect(prot).to receive(:write_field_stop).ordered + expect(prot).to receive(:write_struct_end).ordered e = Thrift::ApplicationException.new(nil) e.write(prot) @@ -134,8 +134,8 @@ describe 'Exception' do describe Thrift::ProtocolException do it "should have an accessible type" do prot = Thrift::ProtocolException.new(Thrift::ProtocolException::SIZE_LIMIT, "message") - prot.type.should == Thrift::ProtocolException::SIZE_LIMIT - prot.message.should == "message" + expect(prot.type).to eq(Thrift::ProtocolException::SIZE_LIMIT) + expect(prot.message).to eq("message") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/flat_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/flat_spec.rb index f37878231..893056c10 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/flat_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/flat_spec.rb @@ -32,7 +32,7 @@ describe 'generation' do "other_namespace/referenced_constants.rb", "other_namespace/referenced_types.rb" ].each do |name| - File.exist?(File.join(prefix, name)).should_not be_true + expect(File.exist?(File.join(prefix, name))).not_to be_truthy end end @@ -44,19 +44,19 @@ describe 'generation' do "referenced_constants.rb", "referenced_types.rb" ].each do |name| - File.exist?(File.join(prefix, name)).should be_true + expect(File.exist?(File.join(prefix, name))).to be_truthy end end it "has a service class in the right place" do - defined?(NamespacedSpecNamespace::NamespacedNonblockingService).should be_true + expect(defined?(NamespacedSpecNamespace::NamespacedNonblockingService)).to be_truthy end it "has a struct in the right place" do - defined?(NamespacedSpecNamespace::Hello).should be_true + expect(defined?(NamespacedSpecNamespace::Hello)).to be_truthy end it "required an included file" do - defined?(OtherNamespace::SomeEnum).should be_true + expect(defined?(OtherNamespace::SomeEnum)).to be_truthy end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/http_client_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/http_client_spec.rb index 5e8da24b2..df472ab33 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/http_client_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/http_client_spec.rb @@ -25,28 +25,32 @@ describe 'Thrift::HTTPClientTransport' do before(:each) do @client = Thrift::HTTPClientTransport.new("http://my.domain.com/path/to/service?param=value") end + + it "should provide a reasonable to_s" do + @client.to_s == "http://my.domain.com/path/to/service?param=value" + end it "should always be open" do - @client.should be_open + expect(@client).to be_open @client.close - @client.should be_open + expect(@client).to be_open end it "should post via HTTP and return the results" do @client.write "a test" @client.write " frame" - Net::HTTP.should_receive(:new).with("my.domain.com", 80).and_return do - mock("Net::HTTP").tap do |http| - http.should_receive(:use_ssl=).with(false) - http.should_receive(:post).with("/path/to/service?param=value", "a test frame", {"Content-Type"=>"application/x-thrift"}).and_return do - mock("Net::HTTPOK").tap do |response| - response.should_receive(:body).and_return "data" + expect(Net::HTTP).to receive(:new).with("my.domain.com", 80) do + double("Net::HTTP").tap do |http| + expect(http).to receive(:use_ssl=).with(false) + expect(http).to receive(:post).with("/path/to/service?param=value", "a test frame", {"Content-Type"=>"application/x-thrift"}) do + double("Net::HTTPOK").tap do |response| + expect(response).to receive(:body).and_return "data" end end end end @client.flush - @client.read(10).should == "data" + expect(@client.read(10)).to eq("data") end it "should send custom headers if defined" do @@ -55,12 +59,12 @@ describe 'Thrift::HTTPClientTransport' do headers = {"Content-Type"=>"application/x-thrift"}.merge(custom_headers) @client.add_headers(custom_headers) - Net::HTTP.should_receive(:new).with("my.domain.com", 80).and_return do - mock("Net::HTTP").tap do |http| - http.should_receive(:use_ssl=).with(false) - http.should_receive(:post).with("/path/to/service?param=value", "test", headers).and_return do - mock("Net::HTTPOK").tap do |response| - response.should_receive(:body).and_return "data" + expect(Net::HTTP).to receive(:new).with("my.domain.com", 80) do + double("Net::HTTP").tap do |http| + expect(http).to receive(:use_ssl=).with(false) + expect(http).to receive(:post).with("/path/to/service?param=value", "test", headers) do + double("Net::HTTPOK").tap do |response| + expect(response).to receive(:body).and_return "data" end end end @@ -71,15 +75,15 @@ describe 'Thrift::HTTPClientTransport' do it 'should reset the outbuf on HTTP failures' do @client.write "test" - Net::HTTP.should_receive(:new).with("my.domain.com", 80).and_return do - mock("Net::HTTP").tap do |http| - http.should_receive(:use_ssl=).with(false) - http.should_receive(:post).with("/path/to/service?param=value", "test", {"Content-Type"=>"application/x-thrift"}) { raise Net::ReadTimeout } + expect(Net::HTTP).to receive(:new).with("my.domain.com", 80) do + double("Net::HTTP").tap do |http| + expect(http).to receive(:use_ssl=).with(false) + expect(http).to receive(:post).with("/path/to/service?param=value", "test", {"Content-Type"=>"application/x-thrift"}) { raise Net::ReadTimeout } end end @client.flush rescue - @client.instance_variable_get(:@outbuf).should eq(Thrift::Bytes.empty_byte_buffer) + expect(@client.instance_variable_get(:@outbuf)).to eq(Thrift::Bytes.empty_byte_buffer) end end @@ -95,20 +99,20 @@ describe 'Thrift::HTTPClientTransport' do client.write "test" - Net::HTTP.should_receive(:new).with("my.domain.com", 443).and_return do - mock("Net::HTTP").tap do |http| - http.should_receive(:use_ssl=).with(true) - http.should_receive(:verify_mode=).with(OpenSSL::SSL::VERIFY_PEER) - http.should_receive(:post).with(@service_path, "test", - "Content-Type" => "application/x-thrift").and_return do - mock("Net::HTTPOK").tap do |response| - response.should_receive(:body).and_return "data" + expect(Net::HTTP).to receive(:new).with("my.domain.com", 443) do + double("Net::HTTP").tap do |http| + expect(http).to receive(:use_ssl=).with(true) + expect(http).to receive(:verify_mode=).with(OpenSSL::SSL::VERIFY_PEER) + expect(http).to receive(:post).with(@service_path, "test", + "Content-Type" => "application/x-thrift") do + double("Net::HTTPOK").tap do |response| + expect(response).to receive(:body).and_return "data" end end end end client.flush - client.read(4).should == "data" + expect(client.read(4)).to eq("data") end it "should set SSL verify mode when specified" do @@ -116,20 +120,20 @@ describe 'Thrift::HTTPClientTransport' do :ssl_verify_mode => OpenSSL::SSL::VERIFY_NONE) client.write "test" - Net::HTTP.should_receive(:new).with("my.domain.com", 443).and_return do - mock("Net::HTTP").tap do |http| - http.should_receive(:use_ssl=).with(true) - http.should_receive(:verify_mode=).with(OpenSSL::SSL::VERIFY_NONE) - http.should_receive(:post).with(@service_path, "test", - "Content-Type" => "application/x-thrift").and_return do - mock("Net::HTTPOK").tap do |response| - response.should_receive(:body).and_return "data" + expect(Net::HTTP).to receive(:new).with("my.domain.com", 443) do + double("Net::HTTP").tap do |http| + expect(http).to receive(:use_ssl=).with(true) + expect(http).to receive(:verify_mode=).with(OpenSSL::SSL::VERIFY_NONE) + expect(http).to receive(:post).with(@service_path, "test", + "Content-Type" => "application/x-thrift") do + double("Net::HTTPOK").tap do |response| + expect(response).to receive(:body).and_return "data" end end end end client.flush - client.read(4).should == "data" + expect(client.read(4)).to eq("data") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/json_protocol_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/json_protocol_spec.rb index b6b46bff3..fe1af7bb2 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/json_protocol_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/json_protocol_spec.rb @@ -30,257 +30,257 @@ describe 'JsonProtocol' do it "should write json escaped char" do @prot.write_json_escape_char("\n") - @trans.read(@trans.available).should == '\u000a' + expect(@trans.read(@trans.available)).to eq('\u000a') @prot.write_json_escape_char(" ") - @trans.read(@trans.available).should == '\u0020' + expect(@trans.read(@trans.available)).to eq('\u0020') end it "should write json char" do @prot.write_json_char("\n") - @trans.read(@trans.available).should == '\\n' + expect(@trans.read(@trans.available)).to eq('\\n') @prot.write_json_char(" ") - @trans.read(@trans.available).should == ' ' + expect(@trans.read(@trans.available)).to eq(' ') @prot.write_json_char("\\") - @trans.read(@trans.available).should == "\\\\" + expect(@trans.read(@trans.available)).to eq("\\\\") @prot.write_json_char("@") - @trans.read(@trans.available).should == '@' + expect(@trans.read(@trans.available)).to eq('@') end it "should write json string" do @prot.write_json_string("this is a \\ json\nstring") - @trans.read(@trans.available).should == "\"this is a \\\\ json\\nstring\"" + expect(@trans.read(@trans.available)).to eq("\"this is a \\\\ json\\nstring\"") end it "should write json base64" do @prot.write_json_base64("this is a base64 string") - @trans.read(@trans.available).should == "\"dGhpcyBpcyBhIGJhc2U2NCBzdHJpbmc=\"" + expect(@trans.read(@trans.available)).to eq("\"dGhpcyBpcyBhIGJhc2U2NCBzdHJpbmc=\"") end it "should write json integer" do @prot.write_json_integer(45) - @trans.read(@trans.available).should == "45" + expect(@trans.read(@trans.available)).to eq("45") @prot.write_json_integer(33000) - @trans.read(@trans.available).should == "33000" + expect(@trans.read(@trans.available)).to eq("33000") @prot.write_json_integer(3000000000) - @trans.read(@trans.available).should == "3000000000" + expect(@trans.read(@trans.available)).to eq("3000000000") @prot.write_json_integer(6000000000) - @trans.read(@trans.available).should == "6000000000" + expect(@trans.read(@trans.available)).to eq("6000000000") end it "should write json double" do @prot.write_json_double(12.3) - @trans.read(@trans.available).should == "12.3" + expect(@trans.read(@trans.available)).to eq("12.3") @prot.write_json_double(-3.21) - @trans.read(@trans.available).should == "-3.21" + expect(@trans.read(@trans.available)).to eq("-3.21") @prot.write_json_double(((+1.0/0.0)/(+1.0/0.0))) - @trans.read(@trans.available).should == "\"NaN\"" + expect(@trans.read(@trans.available)).to eq("\"NaN\"") @prot.write_json_double((+1.0/0.0)) - @trans.read(@trans.available).should == "\"Infinity\"" + expect(@trans.read(@trans.available)).to eq("\"Infinity\"") @prot.write_json_double((-1.0/0.0)) - @trans.read(@trans.available).should == "\"-Infinity\"" + expect(@trans.read(@trans.available)).to eq("\"-Infinity\"") end it "should write json object start" do @prot.write_json_object_start - @trans.read(@trans.available).should == "{" + expect(@trans.read(@trans.available)).to eq("{") end it "should write json object end" do @prot.write_json_object_end - @trans.read(@trans.available).should == "}" + expect(@trans.read(@trans.available)).to eq("}") end it "should write json array start" do @prot.write_json_array_start - @trans.read(@trans.available).should == "[" + expect(@trans.read(@trans.available)).to eq("[") end it "should write json array end" do @prot.write_json_array_end - @trans.read(@trans.available).should == "]" + expect(@trans.read(@trans.available)).to eq("]") end it "should write message begin" do @prot.write_message_begin("name", 12, 32) - @trans.read(@trans.available).should == "[1,\"name\",12,32" + expect(@trans.read(@trans.available)).to eq("[1,\"name\",12,32") end it "should write message end" do @prot.write_message_end - @trans.read(@trans.available).should == "]" + expect(@trans.read(@trans.available)).to eq("]") end it "should write struct begin" do @prot.write_struct_begin("name") - @trans.read(@trans.available).should == "{" + expect(@trans.read(@trans.available)).to eq("{") end it "should write struct end" do @prot.write_struct_end - @trans.read(@trans.available).should == "}" + expect(@trans.read(@trans.available)).to eq("}") end it "should write field begin" do @prot.write_field_begin("name", Thrift::Types::STRUCT, 32) - @trans.read(@trans.available).should == "32{\"rec\"" + expect(@trans.read(@trans.available)).to eq("32{\"rec\"") end it "should write field end" do @prot.write_field_end - @trans.read(@trans.available).should == "}" + expect(@trans.read(@trans.available)).to eq("}") end it "should write field stop" do @prot.write_field_stop - @trans.read(@trans.available).should == "" + expect(@trans.read(@trans.available)).to eq("") end it "should write map begin" do @prot.write_map_begin(Thrift::Types::STRUCT, Thrift::Types::LIST, 32) - @trans.read(@trans.available).should == "[\"rec\",\"lst\",32,{" + expect(@trans.read(@trans.available)).to eq("[\"rec\",\"lst\",32,{") end it "should write map end" do @prot.write_map_end - @trans.read(@trans.available).should == "}]" + expect(@trans.read(@trans.available)).to eq("}]") end it "should write list begin" do @prot.write_list_begin(Thrift::Types::STRUCT, 32) - @trans.read(@trans.available).should == "[\"rec\",32" + expect(@trans.read(@trans.available)).to eq("[\"rec\",32") end it "should write list end" do @prot.write_list_end - @trans.read(@trans.available).should == "]" + expect(@trans.read(@trans.available)).to eq("]") end it "should write set begin" do @prot.write_set_begin(Thrift::Types::STRUCT, 32) - @trans.read(@trans.available).should == "[\"rec\",32" + expect(@trans.read(@trans.available)).to eq("[\"rec\",32") end it "should write set end" do @prot.write_set_end - @trans.read(@trans.available).should == "]" + expect(@trans.read(@trans.available)).to eq("]") end it "should write bool" do @prot.write_bool(true) - @trans.read(@trans.available).should == "1" + expect(@trans.read(@trans.available)).to eq("1") @prot.write_bool(false) - @trans.read(@trans.available).should == "0" + expect(@trans.read(@trans.available)).to eq("0") end it "should write byte" do @prot.write_byte(100) - @trans.read(@trans.available).should == "100" + expect(@trans.read(@trans.available)).to eq("100") end it "should write i16" do @prot.write_i16(1000) - @trans.read(@trans.available).should == "1000" + expect(@trans.read(@trans.available)).to eq("1000") end it "should write i32" do @prot.write_i32(3000000000) - @trans.read(@trans.available).should == "3000000000" + expect(@trans.read(@trans.available)).to eq("3000000000") end it "should write i64" do @prot.write_i64(6000000000) - @trans.read(@trans.available).should == "6000000000" + expect(@trans.read(@trans.available)).to eq("6000000000") end it "should write double" do @prot.write_double(1.23) - @trans.read(@trans.available).should == "1.23" + expect(@trans.read(@trans.available)).to eq("1.23") @prot.write_double(-32.1) - @trans.read(@trans.available).should == "-32.1" + expect(@trans.read(@trans.available)).to eq("-32.1") @prot.write_double(((+1.0/0.0)/(+1.0/0.0))) - @trans.read(@trans.available).should == "\"NaN\"" + expect(@trans.read(@trans.available)).to eq("\"NaN\"") @prot.write_double((+1.0/0.0)) - @trans.read(@trans.available).should == "\"Infinity\"" + expect(@trans.read(@trans.available)).to eq("\"Infinity\"") @prot.write_double((-1.0/0.0)) - @trans.read(@trans.available).should == "\"-Infinity\"" + expect(@trans.read(@trans.available)).to eq("\"-Infinity\"") end if RUBY_VERSION >= '1.9' it 'should write string' do @prot.write_string('this is a test string') a = @trans.read(@trans.available) - a.should == '"this is a test string"'.force_encoding(Encoding::BINARY) - a.encoding.should == Encoding::BINARY + expect(a).to eq('"this is a test string"'.force_encoding(Encoding::BINARY)) + expect(a.encoding).to eq(Encoding::BINARY) end it 'should write string with unicode characters' do @prot.write_string("this is a test string with unicode characters: \u20AC \u20AD") a = @trans.read(@trans.available) - a.should == "\"this is a test string with unicode characters: \u20AC \u20AD\"".force_encoding(Encoding::BINARY) - a.encoding.should == Encoding::BINARY + expect(a).to eq("\"this is a test string with unicode characters: \u20AC \u20AD\"".force_encoding(Encoding::BINARY)) + expect(a.encoding).to eq(Encoding::BINARY) end else it 'should write string' do @prot.write_string('this is a test string') - @trans.read(@trans.available).should == '"this is a test string"' + expect(@trans.read(@trans.available)).to eq('"this is a test string"') end end it "should write binary" do @prot.write_binary("this is a base64 string") - @trans.read(@trans.available).should == "\"dGhpcyBpcyBhIGJhc2U2NCBzdHJpbmc=\"" + expect(@trans.read(@trans.available)).to eq("\"dGhpcyBpcyBhIGJhc2U2NCBzdHJpbmc=\"") end it "should write long binary" do @prot.write_binary((0...256).to_a.pack('C*')) - @trans.read(@trans.available).should == "\"AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==\"" + expect(@trans.read(@trans.available)).to eq("\"AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==\"") end it "should get type name for type id" do expect {@prot.get_type_name_for_type_id(Thrift::Types::STOP)}.to raise_error(NotImplementedError) expect {@prot.get_type_name_for_type_id(Thrift::Types::VOID)}.to raise_error(NotImplementedError) - @prot.get_type_name_for_type_id(Thrift::Types::BOOL).should == "tf" - @prot.get_type_name_for_type_id(Thrift::Types::BYTE).should == "i8" - @prot.get_type_name_for_type_id(Thrift::Types::DOUBLE).should == "dbl" - @prot.get_type_name_for_type_id(Thrift::Types::I16).should == "i16" - @prot.get_type_name_for_type_id(Thrift::Types::I32).should == "i32" - @prot.get_type_name_for_type_id(Thrift::Types::I64).should == "i64" - @prot.get_type_name_for_type_id(Thrift::Types::STRING).should == "str" - @prot.get_type_name_for_type_id(Thrift::Types::STRUCT).should == "rec" - @prot.get_type_name_for_type_id(Thrift::Types::MAP).should == "map" - @prot.get_type_name_for_type_id(Thrift::Types::SET).should == "set" - @prot.get_type_name_for_type_id(Thrift::Types::LIST).should == "lst" + expect(@prot.get_type_name_for_type_id(Thrift::Types::BOOL)).to eq("tf") + expect(@prot.get_type_name_for_type_id(Thrift::Types::BYTE)).to eq("i8") + expect(@prot.get_type_name_for_type_id(Thrift::Types::DOUBLE)).to eq("dbl") + expect(@prot.get_type_name_for_type_id(Thrift::Types::I16)).to eq("i16") + expect(@prot.get_type_name_for_type_id(Thrift::Types::I32)).to eq("i32") + expect(@prot.get_type_name_for_type_id(Thrift::Types::I64)).to eq("i64") + expect(@prot.get_type_name_for_type_id(Thrift::Types::STRING)).to eq("str") + expect(@prot.get_type_name_for_type_id(Thrift::Types::STRUCT)).to eq("rec") + expect(@prot.get_type_name_for_type_id(Thrift::Types::MAP)).to eq("map") + expect(@prot.get_type_name_for_type_id(Thrift::Types::SET)).to eq("set") + expect(@prot.get_type_name_for_type_id(Thrift::Types::LIST)).to eq("lst") end it "should get type id for type name" do expect {@prot.get_type_id_for_type_name("pp")}.to raise_error(NotImplementedError) - @prot.get_type_id_for_type_name("tf").should == Thrift::Types::BOOL - @prot.get_type_id_for_type_name("i8").should == Thrift::Types::BYTE - @prot.get_type_id_for_type_name("dbl").should == Thrift::Types::DOUBLE - @prot.get_type_id_for_type_name("i16").should == Thrift::Types::I16 - @prot.get_type_id_for_type_name("i32").should == Thrift::Types::I32 - @prot.get_type_id_for_type_name("i64").should == Thrift::Types::I64 - @prot.get_type_id_for_type_name("str").should == Thrift::Types::STRING - @prot.get_type_id_for_type_name("rec").should == Thrift::Types::STRUCT - @prot.get_type_id_for_type_name("map").should == Thrift::Types::MAP - @prot.get_type_id_for_type_name("set").should == Thrift::Types::SET - @prot.get_type_id_for_type_name("lst").should == Thrift::Types::LIST + expect(@prot.get_type_id_for_type_name("tf")).to eq(Thrift::Types::BOOL) + expect(@prot.get_type_id_for_type_name("i8")).to eq(Thrift::Types::BYTE) + expect(@prot.get_type_id_for_type_name("dbl")).to eq(Thrift::Types::DOUBLE) + expect(@prot.get_type_id_for_type_name("i16")).to eq(Thrift::Types::I16) + expect(@prot.get_type_id_for_type_name("i32")).to eq(Thrift::Types::I32) + expect(@prot.get_type_id_for_type_name("i64")).to eq(Thrift::Types::I64) + expect(@prot.get_type_id_for_type_name("str")).to eq(Thrift::Types::STRING) + expect(@prot.get_type_id_for_type_name("rec")).to eq(Thrift::Types::STRUCT) + expect(@prot.get_type_id_for_type_name("map")).to eq(Thrift::Types::MAP) + expect(@prot.get_type_id_for_type_name("set")).to eq(Thrift::Types::SET) + expect(@prot.get_type_id_for_type_name("lst")).to eq(Thrift::Types::LIST) end it "should read json syntax char" do @@ -292,31 +292,31 @@ describe 'JsonProtocol' do it "should read json escape char" do @trans.write('0054') - @prot.read_json_escape_char.should == 'T' + expect(@prot.read_json_escape_char).to eq('T') @trans.write("\"\\\"\"") - @prot.read_json_string(false).should == "\"" + expect(@prot.read_json_string(false)).to eq("\"") @trans.write("\"\\\\\"") - @prot.read_json_string(false).should == "\\" + expect(@prot.read_json_string(false)).to eq("\\") @trans.write("\"\\/\"") - @prot.read_json_string(false).should == "\/" + expect(@prot.read_json_string(false)).to eq("\/") @trans.write("\"\\b\"") - @prot.read_json_string(false).should == "\b" + expect(@prot.read_json_string(false)).to eq("\b") @trans.write("\"\\f\"") - @prot.read_json_string(false).should == "\f" + expect(@prot.read_json_string(false)).to eq("\f") @trans.write("\"\\n\"") - @prot.read_json_string(false).should == "\n" + expect(@prot.read_json_string(false)).to eq("\n") @trans.write("\"\\r\"") - @prot.read_json_string(false).should == "\r" + expect(@prot.read_json_string(false)).to eq("\r") @trans.write("\"\\t\"") - @prot.read_json_string(false).should == "\t" + expect(@prot.read_json_string(false)).to eq("\t") end it "should read json string" do @@ -324,36 +324,36 @@ describe 'JsonProtocol' do expect {@prot.read_json_string(false)}.to raise_error(Thrift::ProtocolException) @trans.write("\"this is a test string\"") - @prot.read_json_string.should == "this is a test string" + expect(@prot.read_json_string).to eq("this is a test string") end it "should read json base64" do @trans.write("\"dGhpcyBpcyBhIHRlc3Qgc3RyaW5n\"") - @prot.read_json_base64.should == "this is a test string" + expect(@prot.read_json_base64).to eq("this is a test string") end it "should is json numeric" do - @prot.is_json_numeric("A").should == false - @prot.is_json_numeric("+").should == true - @prot.is_json_numeric("-").should == true - @prot.is_json_numeric(".").should == true - @prot.is_json_numeric("0").should == true - @prot.is_json_numeric("1").should == true - @prot.is_json_numeric("2").should == true - @prot.is_json_numeric("3").should == true - @prot.is_json_numeric("4").should == true - @prot.is_json_numeric("5").should == true - @prot.is_json_numeric("6").should == true - @prot.is_json_numeric("7").should == true - @prot.is_json_numeric("8").should == true - @prot.is_json_numeric("9").should == true - @prot.is_json_numeric("E").should == true - @prot.is_json_numeric("e").should == true + expect(@prot.is_json_numeric("A")).to eq(false) + expect(@prot.is_json_numeric("+")).to eq(true) + expect(@prot.is_json_numeric("-")).to eq(true) + expect(@prot.is_json_numeric(".")).to eq(true) + expect(@prot.is_json_numeric("0")).to eq(true) + expect(@prot.is_json_numeric("1")).to eq(true) + expect(@prot.is_json_numeric("2")).to eq(true) + expect(@prot.is_json_numeric("3")).to eq(true) + expect(@prot.is_json_numeric("4")).to eq(true) + expect(@prot.is_json_numeric("5")).to eq(true) + expect(@prot.is_json_numeric("6")).to eq(true) + expect(@prot.is_json_numeric("7")).to eq(true) + expect(@prot.is_json_numeric("8")).to eq(true) + expect(@prot.is_json_numeric("9")).to eq(true) + expect(@prot.is_json_numeric("E")).to eq(true) + expect(@prot.is_json_numeric("e")).to eq(true) end it "should read json numeric chars" do @trans.write("1.453E45T") - @prot.read_json_numeric_chars.should == "1.453E45" + expect(@prot.read_json_numeric_chars).to eq("1.453E45") end it "should read json integer" do @@ -362,7 +362,7 @@ describe 'JsonProtocol' do @prot.read_string @trans.write("1453T") - @prot.read_json_integer.should == 1453 + expect(@prot.read_json_integer).to eq(1453) end it "should read json double" do @@ -374,37 +374,37 @@ describe 'JsonProtocol' do expect {@prot.read_json_double}.to raise_error(Thrift::ProtocolException) @trans.write("1.453e01\"\"") - @prot.read_json_double.should == 14.53 + expect(@prot.read_json_double).to eq(14.53) @prot.read_string @trans.write("\"NaN\"") - @prot.read_json_double.nan?.should == true + expect(@prot.read_json_double.nan?).to eq(true) @trans.write("\"Infinity\"") - @prot.read_json_double.should == +1.0/0.0 + expect(@prot.read_json_double).to eq(+1.0/0.0) @trans.write("\"-Infinity\"") - @prot.read_json_double.should == -1.0/0.0 + expect(@prot.read_json_double).to eq(-1.0/0.0) end it "should read json object start" do @trans.write("{") - @prot.read_json_object_start.should == nil + expect(@prot.read_json_object_start).to eq(nil) end it "should read json object end" do @trans.write("}") - @prot.read_json_object_end.should == nil + expect(@prot.read_json_object_end).to eq(nil) end it "should read json array start" do @trans.write("[") - @prot.read_json_array_start.should == nil + expect(@prot.read_json_array_start).to eq(nil) end it "should read json array end" do @trans.write("]") - @prot.read_json_array_end.should == nil + expect(@prot.read_json_array_end).to eq(nil) end it "should read_message_begin" do @@ -412,133 +412,141 @@ describe 'JsonProtocol' do expect {@prot.read_message_begin}.to raise_error(Thrift::ProtocolException) @trans.write("[1,\"name\",12,32\"\"") - @prot.read_message_begin.should == ["name", 12, 32] + expect(@prot.read_message_begin).to eq(["name", 12, 32]) end it "should read message end" do @trans.write("]") - @prot.read_message_end.should == nil + expect(@prot.read_message_end).to eq(nil) end it "should read struct begin" do @trans.write("{") - @prot.read_struct_begin.should == nil + expect(@prot.read_struct_begin).to eq(nil) end it "should read struct end" do @trans.write("}") - @prot.read_struct_end.should == nil + expect(@prot.read_struct_end).to eq(nil) end it "should read field begin" do @trans.write("1{\"rec\"") - @prot.read_field_begin.should == [nil, 12, 1] + expect(@prot.read_field_begin).to eq([nil, 12, 1]) end it "should read field end" do @trans.write("}") - @prot.read_field_end.should == nil + expect(@prot.read_field_end).to eq(nil) end it "should read map begin" do @trans.write("[\"rec\",\"lst\",2,{") - @prot.read_map_begin.should == [12, 15, 2] + expect(@prot.read_map_begin).to eq([12, 15, 2]) end it "should read map end" do @trans.write("}]") - @prot.read_map_end.should == nil + expect(@prot.read_map_end).to eq(nil) end it "should read list begin" do @trans.write("[\"rec\",2\"\"") - @prot.read_list_begin.should == [12, 2] + expect(@prot.read_list_begin).to eq([12, 2]) end it "should read list end" do @trans.write("]") - @prot.read_list_end.should == nil + expect(@prot.read_list_end).to eq(nil) end it "should read set begin" do @trans.write("[\"rec\",2\"\"") - @prot.read_set_begin.should == [12, 2] + expect(@prot.read_set_begin).to eq([12, 2]) end it "should read set end" do @trans.write("]") - @prot.read_set_end.should == nil + expect(@prot.read_set_end).to eq(nil) end it "should read bool" do @trans.write("0\"\"") - @prot.read_bool.should == false + expect(@prot.read_bool).to eq(false) @prot.read_string @trans.write("1\"\"") - @prot.read_bool.should == true + expect(@prot.read_bool).to eq(true) end it "should read byte" do @trans.write("60\"\"") - @prot.read_byte.should == 60 + expect(@prot.read_byte).to eq(60) end it "should read i16" do @trans.write("1000\"\"") - @prot.read_i16.should == 1000 + expect(@prot.read_i16).to eq(1000) end it "should read i32" do @trans.write("3000000000\"\"") - @prot.read_i32.should == 3000000000 + expect(@prot.read_i32).to eq(3000000000) end it "should read i64" do @trans.write("6000000000\"\"") - @prot.read_i64.should == 6000000000 + expect(@prot.read_i64).to eq(6000000000) end it "should read double" do @trans.write("12.23\"\"") - @prot.read_double.should == 12.23 + expect(@prot.read_double).to eq(12.23) end if RUBY_VERSION >= '1.9' it 'should read string' do @trans.write('"this is a test string"'.force_encoding(Encoding::BINARY)) a = @prot.read_string - a.should == 'this is a test string' - a.encoding.should == Encoding::UTF_8 + expect(a).to eq('this is a test string') + expect(a.encoding).to eq(Encoding::UTF_8) end it 'should read string with unicode characters' do @trans.write('"this is a test string with unicode characters: \u20AC \u20AD"'.force_encoding(Encoding::BINARY)) a = @prot.read_string - a.should == "this is a test string with unicode characters: \u20AC \u20AD" - a.encoding.should == Encoding::UTF_8 + expect(a).to eq("this is a test string with unicode characters: \u20AC \u20AD") + expect(a.encoding).to eq(Encoding::UTF_8) end else it 'should read string' do @trans.write('"this is a test string"') - @prot.read_string.should == 'this is a test string' + expect(@prot.read_string).to eq('this is a test string') end end it "should read binary" do @trans.write("\"dGhpcyBpcyBhIHRlc3Qgc3RyaW5n\"") - @prot.read_binary.should == "this is a test string" + expect(@prot.read_binary).to eq("this is a test string") end it "should read long binary" do @trans.write("\"AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==\"") - @prot.read_binary.bytes.to_a.should == (0...256).to_a + expect(@prot.read_binary.bytes.to_a).to eq((0...256).to_a) + end + + it "should provide a reasonable to_s" do + expect(@prot.to_s).to eq("json(memory)") end end describe Thrift::JsonProtocolFactory do it "should create a JsonProtocol" do - Thrift::JsonProtocolFactory.new.get_protocol(mock("MockTransport")).should be_instance_of(Thrift::JsonProtocol) + expect(Thrift::JsonProtocolFactory.new.get_protocol(double("MockTransport"))).to be_instance_of(Thrift::JsonProtocol) + end + + it "should provide a reasonable to_s" do + expect(Thrift::JsonProtocolFactory.new.to_s).to eq("json") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/namespaced_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/namespaced_spec.rb index 31379d964..4d6d369e5 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/namespaced_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/namespaced_spec.rb @@ -32,7 +32,7 @@ describe 'namespaced generation' do "other_namespace/referenced_constants.rb", "other_namespace/referenced_types.rb" ].each do |name| - File.exist?(File.join(prefix, name)).should be_true + expect(File.exist?(File.join(prefix, name))).to be_truthy end end @@ -44,20 +44,20 @@ describe 'namespaced generation' do "referenced_constants.rb", "referenced_types.rb" ].each do |name| - File.exist?(File.join(prefix, name)).should_not be_true + expect(File.exist?(File.join(prefix, name))).not_to be_truthy end end it "has a service class in the right place" do - defined?(NamespacedSpecNamespace::NamespacedNonblockingService).should be_true + expect(defined?(NamespacedSpecNamespace::NamespacedNonblockingService)).to be_truthy end it "has a struct in the right place" do - defined?(NamespacedSpecNamespace::Hello).should be_true + expect(defined?(NamespacedSpecNamespace::Hello)).to be_truthy end it "required an included file" do - defined?(OtherNamespace::SomeEnum).should be_true + expect(defined?(OtherNamespace::SomeEnum)).to be_truthy end it "extended a service" do diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/nonblocking_server_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/nonblocking_server_spec.rb index 712cf45c2..613d88390 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/nonblocking_server_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/nonblocking_server_spec.rb @@ -176,8 +176,8 @@ describe 'NonblockingServer' do it "should handle basic message passing" do client = setup_client - client.greeting(true).should == SpecNamespace::Hello.new - client.greeting(false).should == SpecNamespace::Hello.new(:greeting => 'Aloha!') + expect(client.greeting(true)).to eq(SpecNamespace::Hello.new) + expect(client.greeting(false)).to eq(SpecNamespace::Hello.new(:greeting => 'Aloha!')) @server.shutdown end @@ -195,7 +195,7 @@ describe 'NonblockingServer' do end 4.times { trans_queue.pop } setup_client.unblock(4) - 4.times { queue.pop.should be_true } + 4.times { expect(queue.pop).to be_truthy } @server.shutdown end @@ -212,15 +212,15 @@ describe 'NonblockingServer' do queues[4] << :hello queues[5] << :hello queues[6] << :hello - 3.times { result.pop.should == SpecNamespace::Hello.new } - client.greeting(true).should == SpecNamespace::Hello.new + 3.times { expect(result.pop).to eq(SpecNamespace::Hello.new) } + expect(client.greeting(true)).to eq(SpecNamespace::Hello.new) queues[5] << [:unblock, 4] - 4.times { result.pop.should be_true } + 4.times { expect(result.pop).to be_truthy } queues[2] << :hello - result.pop.should == SpecNamespace::Hello.new - client.greeting(false).should == SpecNamespace::Hello.new(:greeting => 'Aloha!') + expect(result.pop).to eq(SpecNamespace::Hello.new) + expect(client.greeting(false)).to eq(SpecNamespace::Hello.new(:greeting => 'Aloha!')) 7.times { queues.shift << :exit } - client.greeting(true).should == SpecNamespace::Hello.new + expect(client.greeting(true)).to eq(SpecNamespace::Hello.new) @server.shutdown end @@ -229,7 +229,7 @@ describe 'NonblockingServer' do client = setup_client client.greeting(false) # force a message pass @server.shutdown - @server_thread.join(2).should be_an_instance_of(Thread) + expect(@server_thread.join(2)).to be_an_instance_of(Thread) end it "should continue processing active messages when shutting down" do @@ -238,8 +238,8 @@ describe 'NonblockingServer' do client << :sleep sleep 0.1 # give the server time to start processing the client's message @server.shutdown - @server_thread.join(2).should be_an_instance_of(Thread) - result.pop.should == :slept + expect(@server_thread.join(2)).to be_an_instance_of(Thread) + expect(result.pop).to eq(:slept) end it "should kill active messages when they don't expire while shutting down" do @@ -249,15 +249,15 @@ describe 'NonblockingServer' do sleep 0.1 # start processing the client's message @server.shutdown(1) @catch_exceptions = true - @server_thread.join(3).should_not be_nil - result.should be_empty + expect(@server_thread.join(3)).not_to be_nil + expect(result).to be_empty end it "should allow shutting down in response to a message" do client = setup_client - client.greeting(true).should == SpecNamespace::Hello.new + expect(client.greeting(true)).to eq(SpecNamespace::Hello.new) client.shutdown - @server_thread.join(2).should_not be_nil + expect(@server_thread.join(2)).not_to be_nil end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/processor_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/processor_spec.rb index 989f5cca1..d30553f55 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/processor_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/processor_spec.rb @@ -27,52 +27,52 @@ describe 'Processor' do describe Thrift::Processor do before(:each) do - @processor = ProcessorSpec.new(mock("MockHandler")) - @prot = mock("MockProtocol") + @processor = ProcessorSpec.new(double("MockHandler")) + @prot = double("MockProtocol") end def mock_trans(obj) - obj.should_receive(:trans).ordered.and_return do - mock("trans").tap do |trans| - trans.should_receive(:flush).ordered + expect(obj).to receive(:trans).ordered do + double("trans").tap do |trans| + expect(trans).to receive(:flush).ordered end end end it "should call process_ when it receives that message" do - @prot.should_receive(:read_message_begin).ordered.and_return ['testMessage', Thrift::MessageTypes::CALL, 17] - @processor.should_receive(:process_testMessage).with(17, @prot, @prot).ordered - @processor.process(@prot, @prot).should == true + expect(@prot).to receive(:read_message_begin).ordered.and_return ['testMessage', Thrift::MessageTypes::CALL, 17] + expect(@processor).to receive(:process_testMessage).with(17, @prot, @prot).ordered + expect(@processor.process(@prot, @prot)).to eq(true) end it "should raise an ApplicationException when the received message cannot be processed" do - @prot.should_receive(:read_message_begin).ordered.and_return ['testMessage', Thrift::MessageTypes::CALL, 4] - @prot.should_receive(:skip).with(Thrift::Types::STRUCT).ordered - @prot.should_receive(:read_message_end).ordered - @prot.should_receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::EXCEPTION, 4).ordered - e = mock(Thrift::ApplicationException) - e.should_receive(:write).with(@prot).ordered - Thrift::ApplicationException.should_receive(:new).with(Thrift::ApplicationException::UNKNOWN_METHOD, "Unknown function testMessage").and_return(e) - @prot.should_receive(:write_message_end).ordered + expect(@prot).to receive(:read_message_begin).ordered.and_return ['testMessage', Thrift::MessageTypes::CALL, 4] + expect(@prot).to receive(:skip).with(Thrift::Types::STRUCT).ordered + expect(@prot).to receive(:read_message_end).ordered + expect(@prot).to receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::EXCEPTION, 4).ordered + e = double(Thrift::ApplicationException) + expect(e).to receive(:write).with(@prot).ordered + expect(Thrift::ApplicationException).to receive(:new).with(Thrift::ApplicationException::UNKNOWN_METHOD, "Unknown function testMessage").and_return(e) + expect(@prot).to receive(:write_message_end).ordered mock_trans(@prot) @processor.process(@prot, @prot) end it "should pass args off to the args class" do - args_class = mock("MockArgsClass") - args = mock("#").tap do |args| - args.should_receive(:read).with(@prot).ordered + args_class = double("MockArgsClass") + args = double("#").tap do |args| + expect(args).to receive(:read).with(@prot).ordered end - args_class.should_receive(:new).and_return args - @prot.should_receive(:read_message_end).ordered - @processor.read_args(@prot, args_class).should eql(args) + expect(args_class).to receive(:new).and_return args + expect(@prot).to receive(:read_message_end).ordered + expect(@processor.read_args(@prot, args_class)).to eql(args) end it "should write out a reply when asked" do - @prot.should_receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::REPLY, 23).ordered - result = mock("MockResult") - result.should_receive(:write).with(@prot).ordered - @prot.should_receive(:write_message_end).ordered + expect(@prot).to receive(:write_message_begin).with('testMessage', Thrift::MessageTypes::REPLY, 23).ordered + result = double("MockResult") + expect(result).to receive(:write).with(@prot).ordered + expect(@prot).to receive(:write_message_end).ordered mock_trans(@prot) @processor.write_result(result, @prot, 'testMessage', 23) end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/serializer_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/serializer_spec.rb index 599b454bb..2a7dc6db9 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/serializer_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/serializer_spec.rb @@ -25,19 +25,19 @@ describe 'Serializer' do it "should serialize structs to binary by default" do serializer = Thrift::Serializer.new(Thrift::BinaryProtocolAcceleratedFactory.new) data = serializer.serialize(SpecNamespace::Hello.new(:greeting => "'Ello guv'nor!")) - data.should == "\x0B\x00\x01\x00\x00\x00\x0E'Ello guv'nor!\x00" + expect(data).to eq("\x0B\x00\x01\x00\x00\x00\x0E'Ello guv'nor!\x00") end it "should serialize structs to the given protocol" do - protocol = Thrift::BaseProtocol.new(mock("transport")) - protocol.should_receive(:write_struct_begin).with("SpecNamespace::Hello") - protocol.should_receive(:write_field_begin).with("greeting", Thrift::Types::STRING, 1) - protocol.should_receive(:write_string).with("Good day") - protocol.should_receive(:write_field_end) - protocol.should_receive(:write_field_stop) - protocol.should_receive(:write_struct_end) - protocol_factory = mock("ProtocolFactory") - protocol_factory.stub!(:get_protocol).and_return(protocol) + protocol = Thrift::BaseProtocol.new(double("transport")) + expect(protocol).to receive(:write_struct_begin).with("SpecNamespace::Hello") + expect(protocol).to receive(:write_field_begin).with("greeting", Thrift::Types::STRING, 1) + expect(protocol).to receive(:write_string).with("Good day") + expect(protocol).to receive(:write_field_end) + expect(protocol).to receive(:write_field_stop) + expect(protocol).to receive(:write_struct_end) + protocol_factory = double("ProtocolFactory") + allow(protocol_factory).to receive(:get_protocol).and_return(protocol) serializer = Thrift::Serializer.new(protocol_factory) serializer.serialize(SpecNamespace::Hello.new(:greeting => "Good day")) end @@ -47,21 +47,21 @@ describe 'Serializer' do it "should deserialize structs from binary by default" do deserializer = Thrift::Deserializer.new data = "\x0B\x00\x01\x00\x00\x00\x0E'Ello guv'nor!\x00" - deserializer.deserialize(SpecNamespace::Hello.new, data).should == SpecNamespace::Hello.new(:greeting => "'Ello guv'nor!") + expect(deserializer.deserialize(SpecNamespace::Hello.new, data)).to eq(SpecNamespace::Hello.new(:greeting => "'Ello guv'nor!")) end it "should deserialize structs from the given protocol" do - protocol = Thrift::BaseProtocol.new(mock("transport")) - protocol.should_receive(:read_struct_begin).and_return("SpecNamespace::Hello") - protocol.should_receive(:read_field_begin).and_return(["greeting", Thrift::Types::STRING, 1], + protocol = Thrift::BaseProtocol.new(double("transport")) + expect(protocol).to receive(:read_struct_begin).and_return("SpecNamespace::Hello") + expect(protocol).to receive(:read_field_begin).and_return(["greeting", Thrift::Types::STRING, 1], [nil, Thrift::Types::STOP, 0]) - protocol.should_receive(:read_string).and_return("Good day") - protocol.should_receive(:read_field_end) - protocol.should_receive(:read_struct_end) - protocol_factory = mock("ProtocolFactory") - protocol_factory.stub!(:get_protocol).and_return(protocol) + expect(protocol).to receive(:read_string).and_return("Good day") + expect(protocol).to receive(:read_field_end) + expect(protocol).to receive(:read_struct_end) + protocol_factory = double("ProtocolFactory") + allow(protocol_factory).to receive(:get_protocol).and_return(protocol) deserializer = Thrift::Deserializer.new(protocol_factory) - deserializer.deserialize(SpecNamespace::Hello.new, "").should == SpecNamespace::Hello.new(:greeting => "Good day") + expect(deserializer.deserialize(SpecNamespace::Hello.new, "")).to eq(SpecNamespace::Hello.new(:greeting => "Good day")) end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/server_socket_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/server_socket_spec.rb index 1301d540f..ec9e55005 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/server_socket_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/server_socket_spec.rb @@ -28,52 +28,57 @@ describe 'Thrift::ServerSocket' do end it "should create a handle when calling listen" do - TCPServer.should_receive(:new).with(nil, 1234) + expect(TCPServer).to receive(:new).with(nil, 1234) @socket.listen end it "should accept an optional host argument" do @socket = Thrift::ServerSocket.new('localhost', 1234) - TCPServer.should_receive(:new).with('localhost', 1234) + expect(TCPServer).to receive(:new).with('localhost', 1234) + @socket.to_s == "server(localhost:1234)" @socket.listen end it "should create a Thrift::Socket to wrap accepted sockets" do - handle = mock("TCPServer") - TCPServer.should_receive(:new).with(nil, 1234).and_return(handle) + handle = double("TCPServer") + expect(TCPServer).to receive(:new).with(nil, 1234).and_return(handle) @socket.listen - sock = mock("sock") - handle.should_receive(:accept).and_return(sock) - trans = mock("Socket") - Thrift::Socket.should_receive(:new).and_return(trans) - trans.should_receive(:handle=).with(sock) - @socket.accept.should == trans + sock = double("sock") + expect(handle).to receive(:accept).and_return(sock) + trans = double("Socket") + expect(Thrift::Socket).to receive(:new).and_return(trans) + expect(trans).to receive(:handle=).with(sock) + expect(@socket.accept).to eq(trans) end it "should close the handle when closed" do - handle = mock("TCPServer", :closed? => false) - TCPServer.should_receive(:new).with(nil, 1234).and_return(handle) + handle = double("TCPServer", :closed? => false) + expect(TCPServer).to receive(:new).with(nil, 1234).and_return(handle) @socket.listen - handle.should_receive(:close) + expect(handle).to receive(:close) @socket.close end it "should return nil when accepting if there is no handle" do - @socket.accept.should be_nil + expect(@socket.accept).to be_nil end it "should return true for closed? when appropriate" do - handle = mock("TCPServer", :closed? => false) - TCPServer.stub!(:new).and_return(handle) + handle = double("TCPServer", :closed? => false) + allow(TCPServer).to receive(:new).and_return(handle) @socket.listen - @socket.should_not be_closed - handle.stub!(:close) + expect(@socket).not_to be_closed + allow(handle).to receive(:close) @socket.close - @socket.should be_closed + expect(@socket).to be_closed @socket.listen - @socket.should_not be_closed - handle.stub!(:closed?).and_return(true) - @socket.should be_closed + expect(@socket).not_to be_closed + allow(handle).to receive(:closed?).and_return(true) + expect(@socket).to be_closed + end + + it "should provide a reasonable to_s" do + expect(@socket.to_s).to eq("socket(:1234)") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/server_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/server_spec.rb index 93b919568..ee58c7cb4 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/server_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/server_spec.rb @@ -21,88 +21,126 @@ require 'spec_helper' describe 'Server' do describe Thrift::BaseServer do - it "should default to BaseTransportFactory and BinaryProtocolFactory when not specified" do - server = Thrift::BaseServer.new(mock("Processor"), mock("BaseServerTransport")) - server.instance_variable_get(:'@transport_factory').should be_an_instance_of(Thrift::BaseTransportFactory) - server.instance_variable_get(:'@protocol_factory').should be_an_instance_of(Thrift::BinaryProtocolFactory) + before(:each) do + @processor = double("Processor") + @serverTrans = double("ServerTransport") + @trans = double("BaseTransport") + @prot = double("BaseProtocol") + @server = described_class.new(@processor, @serverTrans, @trans, @prot) end - # serve is a noop, so can't test that + it "should default to BaseTransportFactory and BinaryProtocolFactory when not specified" do + @server = Thrift::BaseServer.new(double("Processor"), double("BaseServerTransport")) + expect(@server.instance_variable_get(:'@transport_factory')).to be_an_instance_of(Thrift::BaseTransportFactory) + expect(@server.instance_variable_get(:'@protocol_factory')).to be_an_instance_of(Thrift::BinaryProtocolFactory) + end + + it "should not serve" do + expect { @server.serve()}.to raise_error(NotImplementedError) + end + + it "should provide a reasonable to_s" do + expect(@serverTrans).to receive(:to_s).once.and_return("serverTrans") + expect(@trans).to receive(:to_s).once.and_return("trans") + expect(@prot).to receive(:to_s).once.and_return("prot") + expect(@server.to_s).to eq("server(prot(trans(serverTrans)))") + end end describe Thrift::SimpleServer do before(:each) do - @processor = mock("Processor") - @serverTrans = mock("ServerTransport") - @trans = mock("BaseTransport") - @prot = mock("BaseProtocol") - @client = mock("Client") + @processor = double("Processor") + @serverTrans = double("ServerTransport") + @trans = double("BaseTransport") + @prot = double("BaseProtocol") + @client = double("Client") @server = described_class.new(@processor, @serverTrans, @trans, @prot) end + it "should provide a reasonable to_s" do + expect(@serverTrans).to receive(:to_s).once.and_return("serverTrans") + expect(@trans).to receive(:to_s).once.and_return("trans") + expect(@prot).to receive(:to_s).once.and_return("prot") + expect(@server.to_s).to eq("simple(server(prot(trans(serverTrans))))") + end + it "should serve in the main thread" do - @serverTrans.should_receive(:listen).ordered - @serverTrans.should_receive(:accept).exactly(3).times.and_return(@client) - @trans.should_receive(:get_transport).exactly(3).times.with(@client).and_return(@trans) - @prot.should_receive(:get_protocol).exactly(3).times.with(@trans).and_return(@prot) + expect(@serverTrans).to receive(:listen).ordered + expect(@serverTrans).to receive(:accept).exactly(3).times.and_return(@client) + expect(@trans).to receive(:get_transport).exactly(3).times.with(@client).and_return(@trans) + expect(@prot).to receive(:get_protocol).exactly(3).times.with(@trans).and_return(@prot) x = 0 - @processor.should_receive(:process).exactly(3).times.with(@prot, @prot).and_return do + expect(@processor).to receive(:process).exactly(3).times.with(@prot, @prot) do case (x += 1) when 1 then raise Thrift::TransportException when 2 then raise Thrift::ProtocolException when 3 then throw :stop end end - @trans.should_receive(:close).exactly(3).times - @serverTrans.should_receive(:close).ordered - lambda { @server.serve }.should throw_symbol(:stop) + expect(@trans).to receive(:close).exactly(3).times + expect(@serverTrans).to receive(:close).ordered + expect { @server.serve }.to throw_symbol(:stop) end end describe Thrift::ThreadedServer do before(:each) do - @processor = mock("Processor") - @serverTrans = mock("ServerTransport") - @trans = mock("BaseTransport") - @prot = mock("BaseProtocol") - @client = mock("Client") + @processor = double("Processor") + @serverTrans = double("ServerTransport") + @trans = double("BaseTransport") + @prot = double("BaseProtocol") + @client = double("Client") @server = described_class.new(@processor, @serverTrans, @trans, @prot) end + it "should provide a reasonable to_s" do + expect(@serverTrans).to receive(:to_s).once.and_return("serverTrans") + expect(@trans).to receive(:to_s).once.and_return("trans") + expect(@prot).to receive(:to_s).once.and_return("prot") + expect(@server.to_s).to eq("threaded(server(prot(trans(serverTrans))))") + end + it "should serve using threads" do - @serverTrans.should_receive(:listen).ordered - @serverTrans.should_receive(:accept).exactly(3).times.and_return(@client) - @trans.should_receive(:get_transport).exactly(3).times.with(@client).and_return(@trans) - @prot.should_receive(:get_protocol).exactly(3).times.with(@trans).and_return(@prot) - Thread.should_receive(:new).with(@prot, @trans).exactly(3).times.and_yield(@prot, @trans) + expect(@serverTrans).to receive(:listen).ordered + expect(@serverTrans).to receive(:accept).exactly(3).times.and_return(@client) + expect(@trans).to receive(:get_transport).exactly(3).times.with(@client).and_return(@trans) + expect(@prot).to receive(:get_protocol).exactly(3).times.with(@trans).and_return(@prot) + expect(Thread).to receive(:new).with(@prot, @trans).exactly(3).times.and_yield(@prot, @trans) x = 0 - @processor.should_receive(:process).exactly(3).times.with(@prot, @prot).and_return do + expect(@processor).to receive(:process).exactly(3).times.with(@prot, @prot) do case (x += 1) when 1 then raise Thrift::TransportException when 2 then raise Thrift::ProtocolException when 3 then throw :stop end end - @trans.should_receive(:close).exactly(3).times - @serverTrans.should_receive(:close).ordered - lambda { @server.serve }.should throw_symbol(:stop) + expect(@trans).to receive(:close).exactly(3).times + expect(@serverTrans).to receive(:close).ordered + expect { @server.serve }.to throw_symbol(:stop) end end describe Thrift::ThreadPoolServer do before(:each) do - @processor = mock("Processor") - @server_trans = mock("ServerTransport") - @trans = mock("BaseTransport") - @prot = mock("BaseProtocol") - @client = mock("Client") + @processor = double("Processor") + @server_trans = double("ServerTransport") + @trans = double("BaseTransport") + @prot = double("BaseProtocol") + @client = double("Client") @server = described_class.new(@processor, @server_trans, @trans, @prot) - sleep(0.1) + sleep(0.15) end + it "should provide a reasonable to_s" do + expect(@server_trans).to receive(:to_s).once.and_return("server_trans") + expect(@trans).to receive(:to_s).once.and_return("trans") + expect(@prot).to receive(:to_s).once.and_return("prot") + expect(@server.to_s).to eq("threadpool(server(prot(trans(server_trans))))") + end + it "should serve inside a thread" do exception_q = @server.instance_variable_get(:@exception_q) - described_class.any_instance.should_receive(:serve) do + expect_any_instance_of(described_class).to receive(:serve) do exception_q.push(StandardError.new('ERROR')) end expect { @server.rescuable_serve }.to(raise_error('ERROR')) @@ -110,7 +148,7 @@ describe 'Server' do it "should avoid running the server twice when retrying rescuable_serve" do exception_q = @server.instance_variable_get(:@exception_q) - described_class.any_instance.should_receive(:serve) do + expect_any_instance_of(described_class).to receive(:serve) do exception_q.push(StandardError.new('ERROR1')) exception_q.push(StandardError.new('ERROR2')) end @@ -119,29 +157,29 @@ describe 'Server' do end it "should serve using a thread pool" do - thread_q = mock("SizedQueue") - exception_q = mock("Queue") + thread_q = double("SizedQueue") + exception_q = double("Queue") @server.instance_variable_set(:@thread_q, thread_q) @server.instance_variable_set(:@exception_q, exception_q) - @server_trans.should_receive(:listen).ordered - thread_q.should_receive(:push).with(:token) - thread_q.should_receive(:pop) - Thread.should_receive(:new).and_yield - @server_trans.should_receive(:accept).exactly(3).times.and_return(@client) - @trans.should_receive(:get_transport).exactly(3).times.and_return(@trans) - @prot.should_receive(:get_protocol).exactly(3).times.and_return(@prot) + expect(@server_trans).to receive(:listen).ordered + expect(thread_q).to receive(:push).with(:token) + expect(thread_q).to receive(:pop) + expect(Thread).to receive(:new).and_yield + expect(@server_trans).to receive(:accept).exactly(3).times.and_return(@client) + expect(@trans).to receive(:get_transport).exactly(3).times.and_return(@trans) + expect(@prot).to receive(:get_protocol).exactly(3).times.and_return(@prot) x = 0 error = RuntimeError.new("Stopped") - @processor.should_receive(:process).exactly(3).times.with(@prot, @prot).and_return do + expect(@processor).to receive(:process).exactly(3).times.with(@prot, @prot) do case (x += 1) when 1 then raise Thrift::TransportException when 2 then raise Thrift::ProtocolException when 3 then raise error end end - @trans.should_receive(:close).exactly(3).times - exception_q.should_receive(:push).with(error).and_throw(:stop) - @server_trans.should_receive(:close) + expect(@trans).to receive(:close).exactly(3).times + expect(exception_q).to receive(:push).with(error).and_throw(:stop) + expect(@server_trans).to receive(:close) expect { @server.serve }.to(throw_symbol(:stop)) end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec.rb index 8e1ef50be..202c745ea 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec.rb @@ -25,37 +25,44 @@ describe 'Socket' do describe Thrift::Socket do before(:each) do @socket = Thrift::Socket.new - @handle = mock("Handle", :closed? => false) - @handle.stub!(:close) - @handle.stub!(:connect_nonblock) - @handle.stub!(:setsockopt) - ::Socket.stub!(:new).and_return(@handle) + @handle = double("Handle", :closed? => false) + allow(@handle).to receive(:close) + allow(@handle).to receive(:connect_nonblock) + allow(@handle).to receive(:setsockopt) + allow(::Socket).to receive(:new).and_return(@handle) end it_should_behave_like "a socket" it "should raise a TransportException when it cannot open a socket" do - ::Socket.should_receive(:getaddrinfo).with("localhost", 9090, nil, ::Socket::SOCK_STREAM).and_return([[]]) - lambda { @socket.open }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::NOT_OPEN } + expect(::Socket).to receive(:getaddrinfo).with("localhost", 9090, nil, ::Socket::SOCK_STREAM).and_return([[]]) + expect { @socket.open }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::NOT_OPEN) } end it "should open a ::Socket with default args" do - ::Socket.should_receive(:new).and_return(mock("Handle", :connect_nonblock => true, :setsockopt => nil)) - ::Socket.should_receive(:getaddrinfo).with("localhost", 9090, nil, ::Socket::SOCK_STREAM).and_return([[]]) - ::Socket.should_receive(:sockaddr_in) + expect(::Socket).to receive(:new).and_return(double("Handle", :connect_nonblock => true, :setsockopt => nil)) + expect(::Socket).to receive(:getaddrinfo).with("localhost", 9090, nil, ::Socket::SOCK_STREAM).and_return([[]]) + expect(::Socket).to receive(:sockaddr_in) + @socket.to_s == "socket(localhost:9090)" @socket.open end it "should accept host/port options" do - ::Socket.should_receive(:new).and_return(mock("Handle", :connect_nonblock => true, :setsockopt => nil)) - ::Socket.should_receive(:getaddrinfo).with("my.domain", 1234, nil, ::Socket::SOCK_STREAM).and_return([[]]) - ::Socket.should_receive(:sockaddr_in) - Thrift::Socket.new('my.domain', 1234).open + expect(::Socket).to receive(:new).and_return(double("Handle", :connect_nonblock => true, :setsockopt => nil)) + expect(::Socket).to receive(:getaddrinfo).with("my.domain", 1234, nil, ::Socket::SOCK_STREAM).and_return([[]]) + expect(::Socket).to receive(:sockaddr_in) + @socket = Thrift::Socket.new('my.domain', 1234).open + @socket.to_s == "socket(my.domain:1234)" end it "should accept an optional timeout" do - ::Socket.stub!(:new) - Thrift::Socket.new('localhost', 8080, 5).timeout.should == 5 + allow(::Socket).to receive(:new) + expect(Thrift::Socket.new('localhost', 8080, 5).timeout).to eq(5) + end + + it "should provide a reasonable to_s" do + allow(::Socket).to receive(:new) + expect(Thrift::Socket.new('myhost', 8090).to_s).to eq("socket(myhost:8090)") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec_shared.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec_shared.rb index 5fddc16a7..32bdb71f0 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec_shared.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/socket_spec_shared.rb @@ -21,84 +21,84 @@ require 'spec_helper' shared_examples_for "a socket" do it "should open a socket" do - @socket.open.should == @handle + expect(@socket.open).to eq(@handle) end it "should be open whenever it has a handle" do - @socket.should_not be_open + expect(@socket).not_to be_open @socket.open - @socket.should be_open + expect(@socket).to be_open @socket.handle = nil - @socket.should_not be_open + expect(@socket).not_to be_open @socket.handle = @handle @socket.close - @socket.should_not be_open + expect(@socket).not_to be_open end it "should write data to the handle" do @socket.open - @handle.should_receive(:write).with("foobar") + expect(@handle).to receive(:write).with("foobar") @socket.write("foobar") - @handle.should_receive(:write).with("fail").and_raise(StandardError) - lambda { @socket.write("fail") }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::NOT_OPEN } + expect(@handle).to receive(:write).with("fail").and_raise(StandardError) + expect { @socket.write("fail") }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::NOT_OPEN) } end it "should raise an error when it cannot read from the handle" do @socket.open - @handle.should_receive(:readpartial).with(17).and_raise(StandardError) - lambda { @socket.read(17) }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::NOT_OPEN } + expect(@handle).to receive(:readpartial).with(17).and_raise(StandardError) + expect { @socket.read(17) }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::NOT_OPEN) } end it "should return the data read when reading from the handle works" do @socket.open - @handle.should_receive(:readpartial).with(17).and_return("test data") - @socket.read(17).should == "test data" + expect(@handle).to receive(:readpartial).with(17).and_return("test data") + expect(@socket.read(17)).to eq("test data") end it "should declare itself as closed when it has an error" do @socket.open - @handle.should_receive(:write).with("fail").and_raise(StandardError) - @socket.should be_open - lambda { @socket.write("fail") }.should raise_error - @socket.should_not be_open + expect(@handle).to receive(:write).with("fail").and_raise(StandardError) + expect(@socket).to be_open + expect { @socket.write("fail") }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::NOT_OPEN) } + expect(@socket).not_to be_open end it "should raise an error when the stream is closed" do @socket.open - @handle.stub!(:closed?).and_return(true) - @socket.should_not be_open - lambda { @socket.write("fail") }.should raise_error(IOError, "closed stream") - lambda { @socket.read(10) }.should raise_error(IOError, "closed stream") + allow(@handle).to receive(:closed?).and_return(true) + expect(@socket).not_to be_open + expect { @socket.write("fail") }.to raise_error(IOError, "closed stream") + expect { @socket.read(10) }.to raise_error(IOError, "closed stream") end it "should support the timeout accessor for read" do @socket.timeout = 3 @socket.open - IO.should_receive(:select).with([@handle], nil, nil, 3).and_return([[@handle], [], []]) - @handle.should_receive(:readpartial).with(17).and_return("test data") - @socket.read(17).should == "test data" + expect(IO).to receive(:select).with([@handle], nil, nil, 3).and_return([[@handle], [], []]) + expect(@handle).to receive(:readpartial).with(17).and_return("test data") + expect(@socket.read(17)).to eq("test data") end it "should support the timeout accessor for write" do @socket.timeout = 3 @socket.open - IO.should_receive(:select).with(nil, [@handle], nil, 3).twice.and_return([[], [@handle], []]) - @handle.should_receive(:write_nonblock).with("test data").and_return(4) - @handle.should_receive(:write_nonblock).with(" data").and_return(5) - @socket.write("test data").should == 9 + expect(IO).to receive(:select).with(nil, [@handle], nil, 3).twice.and_return([[], [@handle], []]) + expect(@handle).to receive(:write_nonblock).with("test data").and_return(4) + expect(@handle).to receive(:write_nonblock).with(" data").and_return(5) + expect(@socket.write("test data")).to eq(9) end it "should raise an error when read times out" do @socket.timeout = 0.5 @socket.open - IO.should_receive(:select).once {sleep(0.5); nil} - lambda { @socket.read(17) }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::TIMED_OUT } + expect(IO).to receive(:select).once {sleep(0.5); nil} + expect { @socket.read(17) }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::TIMED_OUT) } end it "should raise an error when write times out" do @socket.timeout = 0.5 @socket.open - IO.should_receive(:select).with(nil, [@handle], nil, 0.5).any_number_of_times.and_return(nil) - lambda { @socket.write("test data") }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::TIMED_OUT } + allow(IO).to receive(:select).with(nil, [@handle], nil, 0.5).and_return(nil) + expect { @socket.write("test data") }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::TIMED_OUT) } end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_server_socket_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_server_socket_spec.rb new file mode 100644 index 000000000..82e651843 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_server_socket_spec.rb @@ -0,0 +1,34 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +require 'spec_helper' +require File.expand_path("#{File.dirname(__FILE__)}/socket_spec_shared") + +describe 'SSLServerSocket' do + + describe Thrift::SSLServerSocket do + before(:each) do + @socket = Thrift::SSLServerSocket.new(1234) + end + + it "should provide a reasonable to_s" do + expect(@socket.to_s).to eq("ssl(socket(:1234))") + end + end +end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_socket_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_socket_spec.rb index a8bc78540..808d8d512 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_socket_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/ssl_socket_spec.rb @@ -26,49 +26,53 @@ describe 'SSLSocket' do before(:each) do @context = OpenSSL::SSL::SSLContext.new @socket = Thrift::SSLSocket.new - @simple_socket_handle = mock("Handle", :closed? => false) - @simple_socket_handle.stub!(:close) - @simple_socket_handle.stub!(:connect_nonblock) - @simple_socket_handle.stub!(:setsockopt) + @simple_socket_handle = double("Handle", :closed? => false) + allow(@simple_socket_handle).to receive(:close) + allow(@simple_socket_handle).to receive(:connect_nonblock) + allow(@simple_socket_handle).to receive(:setsockopt) - @handle = mock(mock("SSLHandle", :connect_nonblock => true, :post_connection_check => true), :closed? => false) - @handle.stub!(:connect_nonblock) - @handle.stub!(:close) - @handle.stub!(:post_connection_check) + @handle = double(double("SSLHandle", :connect_nonblock => true, :post_connection_check => true), :closed? => false) + allow(@handle).to receive(:connect_nonblock) + allow(@handle).to receive(:close) + allow(@handle).to receive(:post_connection_check) - ::Socket.stub!(:new).and_return(@simple_socket_handle) - OpenSSL::SSL::SSLSocket.stub!(:new).and_return(@handle) + allow(::Socket).to receive(:new).and_return(@simple_socket_handle) + allow(OpenSSL::SSL::SSLSocket).to receive(:new).and_return(@handle) end it_should_behave_like "a socket" it "should raise a TransportException when it cannot open a ssl socket" do - ::Socket.should_receive(:getaddrinfo).with("localhost", 9090, nil, ::Socket::SOCK_STREAM).and_return([[]]) - lambda { @socket.open }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::NOT_OPEN } + expect(::Socket).to receive(:getaddrinfo).with("localhost", 9090, nil, ::Socket::SOCK_STREAM).and_return([[]]) + expect { @socket.open }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::NOT_OPEN) } end it "should open a ::Socket with default args" do - OpenSSL::SSL::SSLSocket.should_receive(:new).with(@simple_socket_handle, nil).and_return(@handle) - @handle.should_receive(:post_connection_check).with('localhost') + expect(OpenSSL::SSL::SSLSocket).to receive(:new).with(@simple_socket_handle, nil).and_return(@handle) + expect(@handle).to receive(:post_connection_check).with('localhost') @socket.open end it "should accept host/port options" do - handle = mock("Handle", :connect_nonblock => true, :setsockopt => nil) - ::Socket.stub!(:new).and_return(handle) - ::Socket.should_receive(:getaddrinfo).with("my.domain", 1234, nil, ::Socket::SOCK_STREAM).and_return([[]]) - ::Socket.should_receive(:sockaddr_in) - OpenSSL::SSL::SSLSocket.should_receive(:new).with(handle, nil).and_return(@handle) - @handle.should_receive(:post_connection_check).with('my.domain') + handle = double("Handle", :connect_nonblock => true, :setsockopt => nil) + allow(::Socket).to receive(:new).and_return(handle) + expect(::Socket).to receive(:getaddrinfo).with("my.domain", 1234, nil, ::Socket::SOCK_STREAM).and_return([[]]) + expect(::Socket).to receive(:sockaddr_in) + expect(OpenSSL::SSL::SSLSocket).to receive(:new).with(handle, nil).and_return(@handle) + expect(@handle).to receive(:post_connection_check).with('my.domain') Thrift::SSLSocket.new('my.domain', 1234, 6000, nil).open end it "should accept an optional timeout" do - Thrift::SSLSocket.new('localhost', 8080, 5).timeout.should == 5 + expect(Thrift::SSLSocket.new('localhost', 8080, 5).timeout).to eq(5) end it "should accept an optional context" do - Thrift::SSLSocket.new('localhost', 8080, 5, @context).ssl_context.should == @context + expect(Thrift::SSLSocket.new('localhost', 8080, 5, @context).ssl_context).to eq(@context) + end + + it "should provide a reasonable to_s" do + expect(Thrift::SSLSocket.new('myhost', 8090).to_s).to eq("ssl(socket(myhost:8090))") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_nested_containers_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_nested_containers_spec.rb index dc8ce5f58..d063569b5 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_nested_containers_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_nested_containers_spec.rb @@ -39,9 +39,9 @@ describe 'StructNestedContainers' do thrift_struct.value = [ [1, 2, 3], [2, 3, 4] ] thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value.push [3, 4, 5] - a.should_not == b + expect(a).not_to eq(b) end end @@ -52,9 +52,9 @@ describe 'StructNestedContainers' do thrift_struct.value = [ [1, 2, 3], [2, 3, 4] ].to_set thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value.add [3, 4, 5] - a.should_not == b + expect(a).not_to eq(b) end end @@ -65,9 +65,9 @@ describe 'StructNestedContainers' do thrift_struct.value = { [1, 2, 3] => 1, [2, 3, 4] => 2 } thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value[[3, 4, 5]] = 3 - a.should_not == b + expect(a).not_to eq(b) end end @@ -78,9 +78,9 @@ describe 'StructNestedContainers' do thrift_struct.value = { 1 => [1, 2, 3], 2 => [2, 3, 4] } thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value[3] = [3, 4, 5] - a.should_not == b + expect(a).not_to eq(b) end end @@ -91,9 +91,9 @@ describe 'StructNestedContainers' do thrift_struct.value = [ [1, 2, 3].to_set, [2, 3, 4].to_set ] thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value.push([3, 4, 5].to_set) - a.should_not == b + expect(a).not_to eq(b) end end @@ -104,9 +104,9 @@ describe 'StructNestedContainers' do thrift_struct.value = [ [1, 2, 3].to_set, [2, 3, 4].to_set ].to_set thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value.add([3, 4, 5].to_set) - a.should_not == b + expect(a).not_to eq(b) end end @@ -117,9 +117,9 @@ describe 'StructNestedContainers' do thrift_struct.value = { [1, 2, 3].to_set => 1, [2, 3, 4].to_set => 2 } thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value[[3, 4, 5].to_set] = 3 - a.should_not == b + expect(a).not_to eq(b) end end @@ -130,9 +130,9 @@ describe 'StructNestedContainers' do thrift_struct.value = { 1 => [1, 2, 3].to_set, 2 => [2, 3, 4].to_set } thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value[3] = [3, 4, 5].to_set - a.should_not == b + expect(a).not_to eq(b) end end @@ -143,9 +143,9 @@ describe 'StructNestedContainers' do thrift_struct.value = [ {1 => 2, 3 => 4}, {2 => 3, 4 => 5} ] thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value.push({ 3 => 4, 5 => 6 }) - a.should_not == b + expect(a).not_to eq(b) end end @@ -156,9 +156,9 @@ describe 'StructNestedContainers' do thrift_struct.value = [ {1 => 2, 3 => 4}, {2 => 3, 4 => 5} ].to_set thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value.add({ 3 => 4, 5 => 6 }) - a.should_not == b + expect(a).not_to eq(b) end end @@ -169,9 +169,9 @@ describe 'StructNestedContainers' do thrift_struct.value = { { 1 => 2, 3 => 4} => 1, {2 => 3, 4 => 5} => 2 } thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value[{3 => 4, 5 => 6}] = 3 - a.should_not == b + expect(a).not_to eq(b) end end @@ -182,9 +182,9 @@ describe 'StructNestedContainers' do thrift_struct.value = { 1 => { 1 => 2, 3 => 4}, 2 => {2 => 3, 4 => 5} } thrift_struct.validate end - a.should == b + expect(a).to eq(b) b.value[3] = { 3 => 4, 5 => 6 } - a.should_not == b + expect(a).not_to eq(b) end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_spec.rb index 6534d616a..b09c7f626 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/struct_spec.rb @@ -25,7 +25,7 @@ describe 'Struct' do it "should iterate over all fields properly" do fields = {} SpecNamespace::Foo.new.each_field { |fid,field_info| fields[fid] = field_info } - fields.should == SpecNamespace::Foo::FIELDS + expect(fields).to eq(SpecNamespace::Foo::FIELDS) end it "should initialize all fields to defaults" do @@ -39,19 +39,19 @@ describe 'Struct' do end def validate_default_arguments(object) - object.simple.should == 53 - object.words.should == "words" - object.hello.should == SpecNamespace::Hello.new(:greeting => 'hello, world!') - object.ints.should == [1, 2, 2, 3] - object.complex.should be_nil - object.shorts.should == Set.new([5, 17, 239]) + expect(object.simple).to eq(53) + expect(object.words).to eq("words") + expect(object.hello).to eq(SpecNamespace::Hello.new(:greeting => 'hello, world!')) + expect(object.ints).to eq([1, 2, 2, 3]) + expect(object.complex).to be_nil + expect(object.shorts).to eq(Set.new([5, 17, 239])) end it "should not share default values between instances" do begin struct = SpecNamespace::Foo.new struct.ints << 17 - SpecNamespace::Foo.new.ints.should == [1,2,2,3] + expect(SpecNamespace::Foo.new.ints).to eq([1,2,2,3]) ensure # ensure no leakage to other tests SpecNamespace::Foo::FIELDS[4][:default] = [1,2,2,3] @@ -60,48 +60,48 @@ describe 'Struct' do it "should properly initialize boolean values" do struct = SpecNamespace::BoolStruct.new(:yesno => false) - struct.yesno.should be_false + expect(struct.yesno).to be_falsey end it "should have proper == semantics" do - SpecNamespace::Foo.new.should_not == SpecNamespace::Hello.new - SpecNamespace::Foo.new.should == SpecNamespace::Foo.new - SpecNamespace::Foo.new(:simple => 52).should_not == SpecNamespace::Foo.new + expect(SpecNamespace::Foo.new).not_to eq(SpecNamespace::Hello.new) + expect(SpecNamespace::Foo.new).to eq(SpecNamespace::Foo.new) + expect(SpecNamespace::Foo.new(:simple => 52)).not_to eq(SpecNamespace::Foo.new) end it "should print enum value names in inspect" do - SpecNamespace::StructWithSomeEnum.new(:some_enum => SpecNamespace::SomeEnum::ONE).inspect.should == "" + expect(SpecNamespace::StructWithSomeEnum.new(:some_enum => SpecNamespace::SomeEnum::ONE).inspect).to eq("") - SpecNamespace::StructWithEnumMap.new(:my_map => {SpecNamespace::SomeEnum::ONE => [SpecNamespace::SomeEnum::TWO]}).inspect.should == "" + expect(SpecNamespace::StructWithEnumMap.new(:my_map => {SpecNamespace::SomeEnum::ONE => [SpecNamespace::SomeEnum::TWO]}).inspect).to eq("") end it "should pretty print binary fields" do - SpecNamespace::Foo2.new(:my_binary => "\001\002\003").inspect.should == "" + expect(SpecNamespace::Foo2.new(:my_binary => "\001\002\003").inspect).to eq("") end it "should offer field? methods" do - SpecNamespace::Foo.new.opt_string?.should be_false - SpecNamespace::Foo.new(:simple => 52).simple?.should be_true - SpecNamespace::Foo.new(:my_bool => false).my_bool?.should be_true - SpecNamespace::Foo.new(:my_bool => true).my_bool?.should be_true + expect(SpecNamespace::Foo.new.opt_string?).to be_falsey + expect(SpecNamespace::Foo.new(:simple => 52).simple?).to be_truthy + expect(SpecNamespace::Foo.new(:my_bool => false).my_bool?).to be_truthy + expect(SpecNamespace::Foo.new(:my_bool => true).my_bool?).to be_truthy end it "should be comparable" do s1 = SpecNamespace::StructWithSomeEnum.new(:some_enum => SpecNamespace::SomeEnum::ONE) s2 = SpecNamespace::StructWithSomeEnum.new(:some_enum => SpecNamespace::SomeEnum::TWO) - (s1 <=> s2).should == -1 - (s2 <=> s1).should == 1 - (s1 <=> s1).should == 0 - (s1 <=> SpecNamespace::StructWithSomeEnum.new()).should == -1 + expect(s1 <=> s2).to eq(-1) + expect(s2 <=> s1).to eq(1) + expect(s1 <=> s1).to eq(0) + expect(s1 <=> SpecNamespace::StructWithSomeEnum.new()).to eq(-1) end it "should read itself off the wire" do struct = SpecNamespace::Foo.new - prot = Thrift::BaseProtocol.new(mock("transport")) - prot.should_receive(:read_struct_begin).twice - prot.should_receive(:read_struct_end).twice - prot.should_receive(:read_field_begin).and_return( + prot = Thrift::BaseProtocol.new(double("transport")) + expect(prot).to receive(:read_struct_begin).twice + expect(prot).to receive(:read_struct_end).twice + expect(prot).to receive(:read_field_begin).and_return( ['complex', Thrift::Types::MAP, 5], # Foo ['words', Thrift::Types::STRING, 2], # Foo ['hello', Thrift::Types::STRUCT, 3], # Foo @@ -112,49 +112,49 @@ describe 'Struct' do ['shorts', Thrift::Types::SET, 6], # Foo [nil, Thrift::Types::STOP, 0] # Hello ) - prot.should_receive(:read_field_end).exactly(7).times - prot.should_receive(:read_map_begin).and_return( + expect(prot).to receive(:read_field_end).exactly(7).times + expect(prot).to receive(:read_map_begin).and_return( [Thrift::Types::I32, Thrift::Types::MAP, 2], # complex [Thrift::Types::STRING, Thrift::Types::DOUBLE, 2], # complex/1/value [Thrift::Types::STRING, Thrift::Types::DOUBLE, 1] # complex/2/value ) - prot.should_receive(:read_map_end).exactly(3).times - prot.should_receive(:read_list_begin).and_return([Thrift::Types::I32, 4]) - prot.should_receive(:read_list_end) - prot.should_receive(:read_set_begin).and_return([Thrift::Types::I16, 2]) - prot.should_receive(:read_set_end) - prot.should_receive(:read_i32).and_return( + expect(prot).to receive(:read_map_end).exactly(3).times + expect(prot).to receive(:read_list_begin).and_return([Thrift::Types::I32, 4]) + expect(prot).to receive(:read_list_end) + expect(prot).to receive(:read_set_begin).and_return([Thrift::Types::I16, 2]) + expect(prot).to receive(:read_set_end) + expect(prot).to receive(:read_i32).and_return( 1, 14, # complex keys 42, # simple 4, 23, 4, 29 # ints ) - prot.should_receive(:read_string).and_return("pi", "e", "feigenbaum", "apple banana", "what's up?") - prot.should_receive(:read_double).and_return(Math::PI, Math::E, 4.669201609) - prot.should_receive(:read_i16).and_return(2, 3) - prot.should_not_receive(:skip) + expect(prot).to receive(:read_string).and_return("pi", "e", "feigenbaum", "apple banana", "what's up?") + expect(prot).to receive(:read_double).and_return(Math::PI, Math::E, 4.669201609) + expect(prot).to receive(:read_i16).and_return(2, 3) + expect(prot).not_to receive(:skip) struct.read(prot) - struct.simple.should == 42 - struct.complex.should == {1 => {"pi" => Math::PI, "e" => Math::E}, 14 => {"feigenbaum" => 4.669201609}} - struct.hello.should == SpecNamespace::Hello.new(:greeting => "what's up?") - struct.words.should == "apple banana" - struct.ints.should == [4, 23, 4, 29] - struct.shorts.should == Set.new([3, 2]) + expect(struct.simple).to eq(42) + expect(struct.complex).to eq({1 => {"pi" => Math::PI, "e" => Math::E}, 14 => {"feigenbaum" => 4.669201609}}) + expect(struct.hello).to eq(SpecNamespace::Hello.new(:greeting => "what's up?")) + expect(struct.words).to eq("apple banana") + expect(struct.ints).to eq([4, 23, 4, 29]) + expect(struct.shorts).to eq(Set.new([3, 2])) end it "should serialize false boolean fields correctly" do b = SpecNamespace::BoolStruct.new(:yesno => false) prot = Thrift::BinaryProtocol.new(Thrift::MemoryBufferTransport.new) - prot.should_receive(:write_bool).with(false) + expect(prot).to receive(:write_bool).with(false) b.write(prot) end it "should skip unexpected fields in structs and use default values" do struct = SpecNamespace::Foo.new - prot = Thrift::BaseProtocol.new(mock("transport")) - prot.should_receive(:read_struct_begin) - prot.should_receive(:read_struct_end) - prot.should_receive(:read_field_begin).and_return( + prot = Thrift::BaseProtocol.new(double("transport")) + expect(prot).to receive(:read_struct_begin) + expect(prot).to receive(:read_struct_end) + expect(prot).to receive(:read_field_begin).and_return( ['simple', Thrift::Types::I32, 1], ['complex', Thrift::Types::STRUCT, 5], ['thinz', Thrift::Types::MAP, 7], @@ -162,55 +162,55 @@ describe 'Struct' do ['words', Thrift::Types::STRING, 2], [nil, Thrift::Types::STOP, 0] ) - prot.should_receive(:read_field_end).exactly(5).times - prot.should_receive(:read_i32).and_return(42) - prot.should_receive(:read_string).and_return("foobar") - prot.should_receive(:skip).with(Thrift::Types::STRUCT) - prot.should_receive(:skip).with(Thrift::Types::MAP) + expect(prot).to receive(:read_field_end).exactly(5).times + expect(prot).to receive(:read_i32).and_return(42) + expect(prot).to receive(:read_string).and_return("foobar") + expect(prot).to receive(:skip).with(Thrift::Types::STRUCT) + expect(prot).to receive(:skip).with(Thrift::Types::MAP) # prot.should_receive(:read_map_begin).and_return([Thrift::Types::I32, Thrift::Types::I32, 0]) # prot.should_receive(:read_map_end) - prot.should_receive(:skip).with(Thrift::Types::I32) + expect(prot).to receive(:skip).with(Thrift::Types::I32) struct.read(prot) - struct.simple.should == 42 - struct.complex.should be_nil - struct.words.should == "foobar" - struct.hello.should == SpecNamespace::Hello.new(:greeting => 'hello, world!') - struct.ints.should == [1, 2, 2, 3] - struct.shorts.should == Set.new([5, 17, 239]) + expect(struct.simple).to eq(42) + expect(struct.complex).to be_nil + expect(struct.words).to eq("foobar") + expect(struct.hello).to eq(SpecNamespace::Hello.new(:greeting => 'hello, world!')) + expect(struct.ints).to eq([1, 2, 2, 3]) + expect(struct.shorts).to eq(Set.new([5, 17, 239])) end it "should write itself to the wire" do - prot = Thrift::BaseProtocol.new(mock("transport")) #mock("Protocol") - prot.should_receive(:write_struct_begin).with("SpecNamespace::Foo") - prot.should_receive(:write_struct_begin).with("SpecNamespace::Hello") - prot.should_receive(:write_struct_end).twice - prot.should_receive(:write_field_begin).with('ints', Thrift::Types::LIST, 4) - prot.should_receive(:write_i32).with(1) - prot.should_receive(:write_i32).with(2).twice - prot.should_receive(:write_i32).with(3) - prot.should_receive(:write_field_begin).with('complex', Thrift::Types::MAP, 5) - prot.should_receive(:write_i32).with(5) - prot.should_receive(:write_string).with('foo') - prot.should_receive(:write_double).with(1.23) - prot.should_receive(:write_field_begin).with('shorts', Thrift::Types::SET, 6) - prot.should_receive(:write_i16).with(5) - prot.should_receive(:write_i16).with(17) - prot.should_receive(:write_i16).with(239) - prot.should_receive(:write_field_stop).twice - prot.should_receive(:write_field_end).exactly(6).times - prot.should_receive(:write_field_begin).with('simple', Thrift::Types::I32, 1) - prot.should_receive(:write_i32).with(53) - prot.should_receive(:write_field_begin).with('hello', Thrift::Types::STRUCT, 3) - prot.should_receive(:write_field_begin).with('greeting', Thrift::Types::STRING, 1) - prot.should_receive(:write_string).with('hello, world!') - prot.should_receive(:write_map_begin).with(Thrift::Types::I32, Thrift::Types::MAP, 1) - prot.should_receive(:write_map_begin).with(Thrift::Types::STRING, Thrift::Types::DOUBLE, 1) - prot.should_receive(:write_map_end).twice - prot.should_receive(:write_list_begin).with(Thrift::Types::I32, 4) - prot.should_receive(:write_list_end) - prot.should_receive(:write_set_begin).with(Thrift::Types::I16, 3) - prot.should_receive(:write_set_end) + prot = Thrift::BaseProtocol.new(double("transport")) #mock("Protocol") + expect(prot).to receive(:write_struct_begin).with("SpecNamespace::Foo") + expect(prot).to receive(:write_struct_begin).with("SpecNamespace::Hello") + expect(prot).to receive(:write_struct_end).twice + expect(prot).to receive(:write_field_begin).with('ints', Thrift::Types::LIST, 4) + expect(prot).to receive(:write_i32).with(1) + expect(prot).to receive(:write_i32).with(2).twice + expect(prot).to receive(:write_i32).with(3) + expect(prot).to receive(:write_field_begin).with('complex', Thrift::Types::MAP, 5) + expect(prot).to receive(:write_i32).with(5) + expect(prot).to receive(:write_string).with('foo') + expect(prot).to receive(:write_double).with(1.23) + expect(prot).to receive(:write_field_begin).with('shorts', Thrift::Types::SET, 6) + expect(prot).to receive(:write_i16).with(5) + expect(prot).to receive(:write_i16).with(17) + expect(prot).to receive(:write_i16).with(239) + expect(prot).to receive(:write_field_stop).twice + expect(prot).to receive(:write_field_end).exactly(6).times + expect(prot).to receive(:write_field_begin).with('simple', Thrift::Types::I32, 1) + expect(prot).to receive(:write_i32).with(53) + expect(prot).to receive(:write_field_begin).with('hello', Thrift::Types::STRUCT, 3) + expect(prot).to receive(:write_field_begin).with('greeting', Thrift::Types::STRING, 1) + expect(prot).to receive(:write_string).with('hello, world!') + expect(prot).to receive(:write_map_begin).with(Thrift::Types::I32, Thrift::Types::MAP, 1) + expect(prot).to receive(:write_map_begin).with(Thrift::Types::STRING, Thrift::Types::DOUBLE, 1) + expect(prot).to receive(:write_map_end).twice + expect(prot).to receive(:write_list_begin).with(Thrift::Types::I32, 4) + expect(prot).to receive(:write_list_end) + expect(prot).to receive(:write_set_begin).with(Thrift::Types::I16, 3) + expect(prot).to receive(:write_set_end) struct = SpecNamespace::Foo.new struct.words = nil @@ -221,50 +221,50 @@ describe 'Struct' do it "should raise an exception if presented with an unknown container" do # yeah this is silly, but I'm going for code coverage here struct = SpecNamespace::Foo.new - lambda { struct.send :write_container, nil, nil, {:type => "foo"} }.should raise_error(StandardError, "Not a container type: foo") + expect { struct.send :write_container, nil, nil, {:type => "foo"} }.to raise_error(StandardError, "Not a container type: foo") end it "should support optional type-checking in Thrift::Struct.new" do Thrift.type_checking = true begin - lambda { SpecNamespace::Hello.new(:greeting => 3) }.should raise_error(Thrift::TypeError, "Expected Types::STRING, received Fixnum for field greeting") + expect { SpecNamespace::Hello.new(:greeting => 3) }.to raise_error(Thrift::TypeError, "Expected Types::STRING, received Fixnum for field greeting") ensure Thrift.type_checking = false end - lambda { SpecNamespace::Hello.new(:greeting => 3) }.should_not raise_error(Thrift::TypeError) + expect { SpecNamespace::Hello.new(:greeting => 3) }.not_to raise_error end it "should support optional type-checking in field accessors" do Thrift.type_checking = true begin hello = SpecNamespace::Hello.new - lambda { hello.greeting = 3 }.should raise_error(Thrift::TypeError, "Expected Types::STRING, received Fixnum for field greeting") + expect { hello.greeting = 3 }.to raise_error(Thrift::TypeError, "Expected Types::STRING, received Fixnum for field greeting") ensure Thrift.type_checking = false end - lambda { hello.greeting = 3 }.should_not raise_error(Thrift::TypeError) + expect { hello.greeting = 3 }.not_to raise_error end it "should raise an exception when unknown types are given to Thrift::Struct.new" do - lambda { SpecNamespace::Hello.new(:fish => 'salmon') }.should raise_error(Exception, "Unknown key given to SpecNamespace::Hello.new: fish") + expect { SpecNamespace::Hello.new(:fish => 'salmon') }.to raise_error(Exception, "Unknown key given to SpecNamespace::Hello.new: fish") end it "should support `raise Xception, 'message'` for Exception structs" do begin raise SpecNamespace::Xception, "something happened" rescue Thrift::Exception => e - e.message.should == "something happened" - e.code.should == 1 + expect(e.message).to eq("something happened") + expect(e.code).to eq(1) # ensure it gets serialized properly, this is the really important part - prot = Thrift::BaseProtocol.new(mock("trans")) - prot.should_receive(:write_struct_begin).with("SpecNamespace::Xception") - prot.should_receive(:write_struct_end) - prot.should_receive(:write_field_begin).with('message', Thrift::Types::STRING, 1)#, "something happened") - prot.should_receive(:write_string).with("something happened") - prot.should_receive(:write_field_begin).with('code', Thrift::Types::I32, 2)#, 1) - prot.should_receive(:write_i32).with(1) - prot.should_receive(:write_field_stop) - prot.should_receive(:write_field_end).twice + prot = Thrift::BaseProtocol.new(double("trans")) + expect(prot).to receive(:write_struct_begin).with("SpecNamespace::Xception") + expect(prot).to receive(:write_struct_end) + expect(prot).to receive(:write_field_begin).with('message', Thrift::Types::STRING, 1)#, "something happened") + expect(prot).to receive(:write_string).with("something happened") + expect(prot).to receive(:write_field_begin).with('code', Thrift::Types::I32, 2)#, 1) + expect(prot).to receive(:write_i32).with(1) + expect(prot).to receive(:write_field_stop) + expect(prot).to receive(:write_field_end).twice e.write(prot) end @@ -274,17 +274,17 @@ describe 'Struct' do begin raise SpecNamespace::Xception, :message => "something happened", :code => 5 rescue Thrift::Exception => e - e.message.should == "something happened" - e.code.should == 5 - prot = Thrift::BaseProtocol.new(mock("trans")) - prot.should_receive(:write_struct_begin).with("SpecNamespace::Xception") - prot.should_receive(:write_struct_end) - prot.should_receive(:write_field_begin).with('message', Thrift::Types::STRING, 1) - prot.should_receive(:write_string).with("something happened") - prot.should_receive(:write_field_begin).with('code', Thrift::Types::I32, 2) - prot.should_receive(:write_i32).with(5) - prot.should_receive(:write_field_stop) - prot.should_receive(:write_field_end).twice + expect(e.message).to eq("something happened") + expect(e.code).to eq(5) + prot = Thrift::BaseProtocol.new(double("trans")) + expect(prot).to receive(:write_struct_begin).with("SpecNamespace::Xception") + expect(prot).to receive(:write_struct_end) + expect(prot).to receive(:write_field_begin).with('message', Thrift::Types::STRING, 1) + expect(prot).to receive(:write_string).with("something happened") + expect(prot).to receive(:write_field_begin).with('code', Thrift::Types::I32, 2) + expect(prot).to receive(:write_i32).with(5) + expect(prot).to receive(:write_field_stop) + expect(prot).to receive(:write_field_end).twice e.write(prot) end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/thin_http_server_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/thin_http_server_spec.rb index 552083921..665391b7d 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/thin_http_server_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/thin_http_server_spec.rb @@ -23,24 +23,24 @@ require 'thrift/server/thin_http_server' describe Thrift::ThinHTTPServer do - let(:processor) { mock('processor') } + let(:processor) { double('processor') } describe "#initialize" do context "when using the defaults" do it "binds to port 80, with host 0.0.0.0, a path of '/'" do - Thin::Server.should_receive(:new).with('0.0.0.0', 80, an_instance_of(Rack::Builder)) + expect(Thin::Server).to receive(:new).with('0.0.0.0', 80, an_instance_of(Rack::Builder)) Thrift::ThinHTTPServer.new(processor) end it 'creates a ThinHTTPServer::RackApplicationContext' do - Thrift::ThinHTTPServer::RackApplication.should_receive(:for).with("/", processor, an_instance_of(Thrift::BinaryProtocolFactory)).and_return(anything) + expect(Thrift::ThinHTTPServer::RackApplication).to receive(:for).with("/", processor, an_instance_of(Thrift::BinaryProtocolFactory)).and_return(anything) Thrift::ThinHTTPServer.new(processor) end it "uses the BinaryProtocolFactory" do - Thrift::BinaryProtocolFactory.should_receive(:new) + expect(Thrift::BinaryProtocolFactory).to receive(:new) Thrift::ThinHTTPServer.new(processor) end @@ -52,7 +52,7 @@ describe Thrift::ThinHTTPServer do ip = "192.168.0.1" port = 3000 path = "/thin" - Thin::Server.should_receive(:new).with(ip, port, an_instance_of(Rack::Builder)) + expect(Thin::Server).to receive(:new).with(ip, port, an_instance_of(Rack::Builder)) Thrift::ThinHTTPServer.new(processor, :ip => ip, :port => port, @@ -60,7 +60,7 @@ describe Thrift::ThinHTTPServer do end it 'creates a ThinHTTPServer::RackApplicationContext with a different protocol factory' do - Thrift::ThinHTTPServer::RackApplication.should_receive(:for).with("/", processor, an_instance_of(Thrift::JsonProtocolFactory)).and_return(anything) + expect(Thrift::ThinHTTPServer::RackApplication).to receive(:for).with("/", processor, an_instance_of(Thrift::JsonProtocolFactory)).and_return(anything) Thrift::ThinHTTPServer.new(processor, :protocol_factory => Thrift::JsonProtocolFactory.new) end @@ -72,12 +72,12 @@ describe Thrift::ThinHTTPServer do describe "#serve" do it 'starts the Thin server' do - underlying_thin_server = mock('thin server', :start => true) - Thin::Server.stub(:new).and_return(underlying_thin_server) + underlying_thin_server = double('thin server', :start => true) + allow(Thin::Server).to receive(:new).and_return(underlying_thin_server) thin_thrift_server = Thrift::ThinHTTPServer.new(processor) - underlying_thin_server.should_receive(:start) + expect(underlying_thin_server).to receive(:start) thin_thrift_server.serve end end @@ -87,8 +87,8 @@ end describe Thrift::ThinHTTPServer::RackApplication do include Rack::Test::Methods - let(:processor) { mock('processor') } - let(:protocol_factory) { mock('protocol factory') } + let(:processor) { double('processor') } + let(:protocol_factory) { double('protocol factory') } def app Thrift::ThinHTTPServer::RackApplication.for("/", processor, protocol_factory) @@ -99,13 +99,13 @@ describe Thrift::ThinHTTPServer::RackApplication do it 'receives a non-POST' do header('Content-Type', "application/x-thrift") get "/" - last_response.status.should be 404 + expect(last_response.status).to be 404 end it 'receives a header other than application/x-thrift' do header('Content-Type', "application/json") post "/" - last_response.status.should be 404 + expect(last_response.status).to be 404 end end @@ -113,26 +113,26 @@ describe Thrift::ThinHTTPServer::RackApplication do context "200 response" do before do - protocol_factory.stub(:get_protocol) - processor.stub(:process) + allow(protocol_factory).to receive(:get_protocol) + allow(processor).to receive(:process) end it 'creates an IOStreamTransport' do header('Content-Type', "application/x-thrift") - Thrift::IOStreamTransport.should_receive(:new).with(an_instance_of(Rack::Lint::InputWrapper), an_instance_of(Rack::Response)) + expect(Thrift::IOStreamTransport).to receive(:new).with(an_instance_of(Rack::Lint::InputWrapper), an_instance_of(Rack::Response)) post "/" end it 'fetches the right protocol based on the Transport' do header('Content-Type', "application/x-thrift") - protocol_factory.should_receive(:get_protocol).with(an_instance_of(Thrift::IOStreamTransport)) + expect(protocol_factory).to receive(:get_protocol).with(an_instance_of(Thrift::IOStreamTransport)) post "/" end it 'status code 200' do header('Content-Type', "application/x-thrift") post "/" - last_response.ok?.should be_true + expect(last_response.ok?).to be_truthy end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/types_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/types_spec.rb index b2c3a200d..364c2a7ec 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/types_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/types_spec.rb @@ -31,85 +31,88 @@ describe Thrift::Types do context 'type checking' do it "should return the proper name for each type" do - Thrift.type_name(Thrift::Types::I16).should == "Types::I16" - Thrift.type_name(Thrift::Types::VOID).should == "Types::VOID" - Thrift.type_name(Thrift::Types::LIST).should == "Types::LIST" - Thrift.type_name(42).should be_nil + expect(Thrift.type_name(Thrift::Types::I16)).to eq("Types::I16") + expect(Thrift.type_name(Thrift::Types::VOID)).to eq("Types::VOID") + expect(Thrift.type_name(Thrift::Types::LIST)).to eq("Types::LIST") + expect(Thrift.type_name(42)).to be_nil end it "should check types properly" do # lambda { Thrift.check_type(nil, Thrift::Types::STOP) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3, {:type => Thrift::Types::STOP}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::VOID}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3, {:type => Thrift::Types::VOID}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(true, {:type => Thrift::Types::BOOL}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3, {:type => Thrift::Types::BOOL}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(42, {:type => Thrift::Types::BYTE}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(42, {:type => Thrift::Types::I16}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(42, {:type => Thrift::Types::I32}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(42, {:type => Thrift::Types::I64}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3.14, {:type => Thrift::Types::I32}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3.14, {:type => Thrift::Types::DOUBLE}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3, {:type => Thrift::Types::DOUBLE}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type("3", {:type => Thrift::Types::STRING}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(3, {:type => Thrift::Types::STRING}, :foo) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type(3, {:type => Thrift::Types::STOP}, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::VOID}, :foo) }.not_to raise_error + expect { Thrift.check_type(3, {:type => Thrift::Types::VOID}, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(true, {:type => Thrift::Types::BOOL}, :foo) }.not_to raise_error + expect { Thrift.check_type(3, {:type => Thrift::Types::BOOL}, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(42, {:type => Thrift::Types::BYTE}, :foo) }.not_to raise_error + expect { Thrift.check_type(42, {:type => Thrift::Types::I16}, :foo) }.not_to raise_error + expect { Thrift.check_type(42, {:type => Thrift::Types::I32}, :foo) }.not_to raise_error + expect { Thrift.check_type(42, {:type => Thrift::Types::I64}, :foo) }.not_to raise_error + expect { Thrift.check_type(3.14, {:type => Thrift::Types::I32}, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(3.14, {:type => Thrift::Types::DOUBLE}, :foo) }.not_to raise_error + expect { Thrift.check_type(3, {:type => Thrift::Types::DOUBLE}, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type("3", {:type => Thrift::Types::STRING}, :foo) }.not_to raise_error + expect { Thrift.check_type(3, {:type => Thrift::Types::STRING}, :foo) }.to raise_error(Thrift::TypeError) hello = SpecNamespace::Hello.new - lambda { Thrift.check_type(hello, {:type => Thrift::Types::STRUCT, :class => SpecNamespace::Hello}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type("foo", {:type => Thrift::Types::STRUCT}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type({:foo => 1}, {:type => Thrift::Types::MAP}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type([1], {:type => Thrift::Types::MAP}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type([1], {:type => Thrift::Types::LIST}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type({:foo => 1}, {:type => Thrift::Types::LIST}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(Set.new([1,2]), {:type => Thrift::Types::SET}, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type([1,2], {:type => Thrift::Types::SET}, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type({:foo => true}, {:type => Thrift::Types::SET}, :foo) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type(hello, {:type => Thrift::Types::STRUCT, :class => SpecNamespace::Hello}, :foo) }.not_to raise_error + expect { Thrift.check_type("foo", {:type => Thrift::Types::STRUCT}, :foo) }.to raise_error(Thrift::TypeError) + field = {:type => Thrift::Types::MAP, :key => {:type => Thrift::Types::I32}, :value => {:type => Thrift::Types::STRING}} + expect { Thrift.check_type({1 => "one"}, field, :foo) }.not_to raise_error + expect { Thrift.check_type([1], field, :foo) }.to raise_error(Thrift::TypeError) + field = {:type => Thrift::Types::LIST, :element => {:type => Thrift::Types::I32}} + expect { Thrift.check_type([1], field, :foo) }.not_to raise_error + expect { Thrift.check_type({:foo => 1}, field, :foo) }.to raise_error(Thrift::TypeError) + field = {:type => Thrift::Types::SET, :element => {:type => Thrift::Types::I32}} + expect { Thrift.check_type(Set.new([1,2]), field, :foo) }.not_to raise_error + expect { Thrift.check_type([1,2], field, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type({:foo => true}, field, :foo) }.to raise_error(Thrift::TypeError) end it "should error out if nil is passed and skip_types is false" do - lambda { Thrift.check_type(nil, {:type => Thrift::Types::BOOL}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::BYTE}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::I16}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::I32}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::I64}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::DOUBLE}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::STRING}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::STRUCT}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::LIST}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::SET}, :foo, false) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(nil, {:type => Thrift::Types::MAP}, :foo, false) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::BOOL}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::BYTE}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::I16}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::I32}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::I64}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::DOUBLE}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::STRING}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::STRUCT}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::LIST}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::SET}, :foo, false) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(nil, {:type => Thrift::Types::MAP}, :foo, false) }.to raise_error(Thrift::TypeError) end it "should check element types on containers" do field = {:type => Thrift::Types::LIST, :element => {:type => Thrift::Types::I32}} - lambda { Thrift.check_type([1, 2], field, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type([1, nil, 2], field, :foo) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type([1, 2], field, :foo) }.not_to raise_error + expect { Thrift.check_type([1, nil, 2], field, :foo) }.to raise_error(Thrift::TypeError) field = {:type => Thrift::Types::MAP, :key => {:type => Thrift::Types::I32}, :value => {:type => Thrift::Types::STRING}} - lambda { Thrift.check_type({1 => "one", 2 => "two"}, field, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type({1 => "one", nil => "nil"}, field, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type({1 => nil, 2 => "two"}, field, :foo) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type({1 => "one", 2 => "two"}, field, :foo) }.not_to raise_error + expect { Thrift.check_type({1 => "one", nil => "nil"}, field, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type({1 => nil, 2 => "two"}, field, :foo) }.to raise_error(Thrift::TypeError) field = {:type => Thrift::Types::SET, :element => {:type => Thrift::Types::I32}} - lambda { Thrift.check_type(Set.new([1, 2]), field, :foo) }.should_not raise_error(Thrift::TypeError) - lambda { Thrift.check_type(Set.new([1, nil, 2]), field, :foo) }.should raise_error(Thrift::TypeError) - lambda { Thrift.check_type(Set.new([1, 2.3, 2]), field, :foo) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type(Set.new([1, 2]), field, :foo) }.not_to raise_error + expect { Thrift.check_type(Set.new([1, nil, 2]), field, :foo) }.to raise_error(Thrift::TypeError) + expect { Thrift.check_type(Set.new([1, 2.3, 2]), field, :foo) }.to raise_error(Thrift::TypeError) field = {:type => Thrift::Types::STRUCT, :class => SpecNamespace::Hello} - lambda { Thrift.check_type(SpecNamespace::BoolStruct, field, :foo) }.should raise_error(Thrift::TypeError) + expect { Thrift.check_type(SpecNamespace::BoolStruct, field, :foo) }.to raise_error(Thrift::TypeError) end it "should give the Thrift::TypeError a readable message" do msg = "Expected Types::STRING, received Fixnum for field foo" - lambda { Thrift.check_type(3, {:type => Thrift::Types::STRING}, :foo) }.should raise_error(Thrift::TypeError, msg) + expect { Thrift.check_type(3, {:type => Thrift::Types::STRING}, :foo) }.to raise_error(Thrift::TypeError, msg) msg = "Expected Types::STRING, received Fixnum for field foo.element" field = {:type => Thrift::Types::LIST, :element => {:type => Thrift::Types::STRING}} - lambda { Thrift.check_type([3], field, :foo) }.should raise_error(Thrift::TypeError, msg) + expect { Thrift.check_type([3], field, :foo) }.to raise_error(Thrift::TypeError, msg) msg = "Expected Types::I32, received NilClass for field foo.element.key" field = {:type => Thrift::Types::LIST, :element => {:type => Thrift::Types::MAP, :key => {:type => Thrift::Types::I32}, :value => {:type => Thrift::Types::I32}}} - lambda { Thrift.check_type([{nil => 3}], field, :foo) }.should raise_error(Thrift::TypeError, msg) + expect { Thrift.check_type([{nil => 3}], field, :foo) }.to raise_error(Thrift::TypeError, msg) msg = "Expected Types::I32, received NilClass for field foo.element.value" - lambda { Thrift.check_type([{1 => nil}], field, :foo) }.should raise_error(Thrift::TypeError, msg) + expect { Thrift.check_type([{1 => nil}], field, :foo) }.to raise_error(Thrift::TypeError, msg) end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/union_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/union_spec.rb index 6ad31940c..0ce630629 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/union_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/union_spec.rb @@ -24,85 +24,85 @@ describe 'Union' do describe Thrift::Union do it "should return nil value in unset union" do union = SpecNamespace::My_union.new - union.get_set_field.should == nil - union.get_value.should == nil + expect(union.get_set_field).to eq(nil) + expect(union.get_value).to eq(nil) end it "should set a field and be accessible through get_value and the named field accessor" do union = SpecNamespace::My_union.new union.integer32 = 25 - union.get_set_field.should == :integer32 - union.get_value.should == 25 - union.integer32.should == 25 + expect(union.get_set_field).to eq(:integer32) + expect(union.get_value).to eq(25) + expect(union.integer32).to eq(25) end it "should work correctly when instantiated with static field constructors" do union = SpecNamespace::My_union.integer32(5) - union.get_set_field.should == :integer32 - union.integer32.should == 5 + expect(union.get_set_field).to eq(:integer32) + expect(union.integer32).to eq(5) end it "should raise for wrong set field" do union = SpecNamespace::My_union.new union.integer32 = 25 - lambda { union.some_characters }.should raise_error(RuntimeError, "some_characters is not union's set field.") + expect { union.some_characters }.to raise_error(RuntimeError, "some_characters is not union's set field.") end it "should raise for wrong set field when hash initialized and type checking is off" do Thrift.type_checking = false union = SpecNamespace::My_union.new({incorrect_field: :incorrect}) example = lambda { Thrift::Serializer.new.serialize(union) } - example.should raise_error(RuntimeError, "set_field is not valid for this union!") + expect(example).to raise_error(RuntimeError, "set_field is not valid for this union!") end it "should not be equal to nil" do union = SpecNamespace::My_union.new - union.should_not == nil + expect(union).not_to eq(nil) end it "should not be equal with an empty String" do union = SpecNamespace::My_union.new - union.should_not == '' + expect(union).not_to eq('') end it "should not equate two different unions, i32 vs. string" do union = SpecNamespace::My_union.new(:integer32, 25) other_union = SpecNamespace::My_union.new(:some_characters, "blah!") - union.should_not == other_union + expect(union).not_to eq(other_union) end it "should properly reset setfield and setvalue" do union = SpecNamespace::My_union.new(:integer32, 25) - union.get_set_field.should == :integer32 + expect(union.get_set_field).to eq(:integer32) union.some_characters = "blah!" - union.get_set_field.should == :some_characters - union.get_value.should == "blah!" - lambda { union.integer32 }.should raise_error(RuntimeError, "integer32 is not union's set field.") + expect(union.get_set_field).to eq(:some_characters) + expect(union.get_value).to eq("blah!") + expect { union.integer32 }.to raise_error(RuntimeError, "integer32 is not union's set field.") end it "should not equate two different unions with different values" do union = SpecNamespace::My_union.new(:integer32, 25) other_union = SpecNamespace::My_union.new(:integer32, 400) - union.should_not == other_union + expect(union).not_to eq(other_union) end it "should not equate two different unions with different fields" do union = SpecNamespace::My_union.new(:integer32, 25) other_union = SpecNamespace::My_union.new(:other_i32, 25) - union.should_not == other_union + expect(union).not_to eq(other_union) end it "should inspect properly" do union = SpecNamespace::My_union.new(:integer32, 25) - union.inspect.should == "" + expect(union.inspect).to eq("") end it "should not allow setting with instance_variable_set" do union = SpecNamespace::My_union.new(:integer32, 27) union.instance_variable_set(:@some_characters, "hallo!") - union.get_set_field.should == :integer32 - union.get_value.should == 27 - lambda { union.some_characters }.should raise_error(RuntimeError, "some_characters is not union's set field.") + expect(union.get_set_field).to eq(:integer32) + expect(union.get_value).to eq(27) + expect { union.some_characters }.to raise_error(RuntimeError, "some_characters is not union's set field.") end it "should serialize to binary correctly" do @@ -114,7 +114,7 @@ describe 'Union' do other_union = SpecNamespace::My_union.new(:integer32, 25) other_union.read(proto) - other_union.should == union + expect(other_union).to eq(union) end it "should serialize to json correctly" do @@ -126,24 +126,24 @@ describe 'Union' do other_union = SpecNamespace::My_union.new(:integer32, 25) other_union.read(proto) - other_union.should == union + expect(other_union).to eq(union) end it "should raise when validating unset union" do union = SpecNamespace::My_union.new - lambda { union.validate }.should raise_error(StandardError, "Union fields are not set.") + expect { union.validate }.to raise_error(StandardError, "Union fields are not set.") other_union = SpecNamespace::My_union.new(:integer32, 1) - lambda { other_union.validate }.should_not raise_error(StandardError, "Union fields are not set.") + expect { other_union.validate }.not_to raise_error end it "should validate an enum field properly" do union = SpecNamespace::TestUnion.new(:enum_field, 3) - union.get_set_field.should == :enum_field - lambda { union.validate }.should raise_error(Thrift::ProtocolException, "Invalid value of field enum_field!") + expect(union.get_set_field).to eq(:enum_field) + expect { union.validate }.to raise_error(Thrift::ProtocolException, "Invalid value of field enum_field!") other_union = SpecNamespace::TestUnion.new(:enum_field, 1) - lambda { other_union.validate }.should_not raise_error(Thrift::ProtocolException, "Invalid value of field enum_field!") + expect { other_union.validate }.not_to raise_error end it "should properly serialize and match structs with a union" do @@ -158,37 +158,37 @@ describe 'Union' do other_union = SpecNamespace::My_union.new(:some_characters, "hello there") swu2 = SpecNamespace::Struct_with_union.new(:fun_union => other_union) - swu2.should_not == swu + expect(swu2).not_to eq(swu) swu2.read(proto) - swu2.should == swu + expect(swu2).to eq(swu) end it "should support old style constructor" do union = SpecNamespace::My_union.new(:integer32 => 26) - union.get_set_field.should == :integer32 - union.get_value.should == 26 + expect(union.get_set_field).to eq(:integer32) + expect(union.get_value).to eq(26) end it "should not throw an error when inspected and unset" do - lambda{SpecNamespace::TestUnion.new().inspect}.should_not raise_error + expect{SpecNamespace::TestUnion.new().inspect}.not_to raise_error end it "should print enum value name when inspected" do - SpecNamespace::My_union.new(:some_enum => SpecNamespace::SomeEnum::ONE).inspect.should == "" + expect(SpecNamespace::My_union.new(:some_enum => SpecNamespace::SomeEnum::ONE).inspect).to eq("") - SpecNamespace::My_union.new(:my_map => {SpecNamespace::SomeEnum::ONE => [SpecNamespace::SomeEnum::TWO]}).inspect.should == "" + expect(SpecNamespace::My_union.new(:my_map => {SpecNamespace::SomeEnum::ONE => [SpecNamespace::SomeEnum::TWO]}).inspect).to eq("") end it "should offer field? methods" do - SpecNamespace::My_union.new.some_enum?.should be_false - SpecNamespace::My_union.new(:some_enum => SpecNamespace::SomeEnum::ONE).some_enum?.should be_true - SpecNamespace::My_union.new(:im_true => false).im_true?.should be_true - SpecNamespace::My_union.new(:im_true => true).im_true?.should be_true + expect(SpecNamespace::My_union.new.some_enum?).to be_falsey + expect(SpecNamespace::My_union.new(:some_enum => SpecNamespace::SomeEnum::ONE).some_enum?).to be_truthy + expect(SpecNamespace::My_union.new(:im_true => false).im_true?).to be_truthy + expect(SpecNamespace::My_union.new(:im_true => true).im_true?).to be_truthy end it "should pretty print binary fields" do - SpecNamespace::TestUnion.new(:binary_field => "\001\002\003").inspect.should == "" + expect(SpecNamespace::TestUnion.new(:binary_field => "\001\002\003").inspect).to eq("") end it "should be comparable" do @@ -207,7 +207,7 @@ describe 'Union' do for y in 0..3 for x in 0..3 # puts "#{objs[y].inspect} <=> #{objs[x].inspect} should == #{relationships[y][x]}" - (objs[y] <=> objs[x]).should == relationships[y][x] + expect(objs[y] <=> objs[x]).to eq(relationships[y][x]) end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/spec/unix_socket_spec.rb b/vendor/git.apache.org/thrift.git/lib/rb/spec/unix_socket_spec.rb index cb6cff3f9..8623e95a0 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/spec/unix_socket_spec.rb +++ b/vendor/git.apache.org/thrift.git/lib/rb/spec/unix_socket_spec.rb @@ -26,21 +26,26 @@ describe 'UNIXSocket' do before(:each) do @path = '/tmp/thrift_spec_socket' @socket = Thrift::UNIXSocket.new(@path) - @handle = mock("Handle", :closed? => false) - @handle.stub!(:close) - ::UNIXSocket.stub!(:new).and_return(@handle) + @handle = double("Handle", :closed? => false) + allow(@handle).to receive(:close) + allow(::UNIXSocket).to receive(:new).and_return(@handle) end it_should_behave_like "a socket" it "should raise a TransportException when it cannot open a socket" do - ::UNIXSocket.should_receive(:new).and_raise(StandardError) - lambda { @socket.open }.should raise_error(Thrift::TransportException) { |e| e.type.should == Thrift::TransportException::NOT_OPEN } + expect(::UNIXSocket).to receive(:new).and_raise(StandardError) + expect { @socket.open }.to raise_error(Thrift::TransportException) { |e| expect(e.type).to eq(Thrift::TransportException::NOT_OPEN) } end it "should accept an optional timeout" do - ::UNIXSocket.stub!(:new) - Thrift::UNIXSocket.new(@path, 5).timeout.should == 5 + allow(::UNIXSocket).to receive(:new) + expect(Thrift::UNIXSocket.new(@path, 5).timeout).to eq(5) + end + + it "should provide a reasonable to_s" do + allow(::UNIXSocket).to receive(:new) + expect(Thrift::UNIXSocket.new(@path).to_s).to eq("domain(#{@path})") end end @@ -51,57 +56,61 @@ describe 'UNIXSocket' do end it "should create a handle when calling listen" do - UNIXServer.should_receive(:new).with(@path) + expect(UNIXServer).to receive(:new).with(@path) @socket.listen end it "should create a Thrift::UNIXSocket to wrap accepted sockets" do - handle = mock("UNIXServer") - UNIXServer.should_receive(:new).with(@path).and_return(handle) + handle = double("UNIXServer") + expect(UNIXServer).to receive(:new).with(@path).and_return(handle) @socket.listen - sock = mock("sock") - handle.should_receive(:accept).and_return(sock) - trans = mock("UNIXSocket") - Thrift::UNIXSocket.should_receive(:new).and_return(trans) - trans.should_receive(:handle=).with(sock) - @socket.accept.should == trans + sock = double("sock") + expect(handle).to receive(:accept).and_return(sock) + trans = double("UNIXSocket") + expect(Thrift::UNIXSocket).to receive(:new).and_return(trans) + expect(trans).to receive(:handle=).with(sock) + expect(@socket.accept).to eq(trans) end it "should close the handle when closed" do - handle = mock("UNIXServer", :closed? => false) - UNIXServer.should_receive(:new).with(@path).and_return(handle) + handle = double("UNIXServer", :closed? => false) + expect(UNIXServer).to receive(:new).with(@path).and_return(handle) @socket.listen - handle.should_receive(:close) - File.stub!(:delete) + expect(handle).to receive(:close) + allow(File).to receive(:delete) @socket.close end it "should delete the socket when closed" do - handle = mock("UNIXServer", :closed? => false) - UNIXServer.should_receive(:new).with(@path).and_return(handle) + handle = double("UNIXServer", :closed? => false) + expect(UNIXServer).to receive(:new).with(@path).and_return(handle) @socket.listen - handle.stub!(:close) - File.should_receive(:delete).with(@path) + allow(handle).to receive(:close) + expect(File).to receive(:delete).with(@path) @socket.close end it "should return nil when accepting if there is no handle" do - @socket.accept.should be_nil + expect(@socket.accept).to be_nil end it "should return true for closed? when appropriate" do - handle = mock("UNIXServer", :closed? => false) - UNIXServer.stub!(:new).and_return(handle) - File.stub!(:delete) + handle = double("UNIXServer", :closed? => false) + allow(UNIXServer).to receive(:new).and_return(handle) + allow(File).to receive(:delete) @socket.listen - @socket.should_not be_closed - handle.stub!(:close) + expect(@socket).not_to be_closed + allow(handle).to receive(:close) @socket.close - @socket.should be_closed + expect(@socket).to be_closed @socket.listen - @socket.should_not be_closed - handle.stub!(:closed?).and_return(true) - @socket.should be_closed + expect(@socket).not_to be_closed + allow(handle).to receive(:closed?).and_return(true) + expect(@socket).to be_closed + end + + it "should provide a reasonable to_s" do + expect(@socket.to_s).to eq("domain(#{@path})") end end end diff --git a/vendor/git.apache.org/thrift.git/lib/rb/thrift.gemspec b/vendor/git.apache.org/thrift.git/lib/rb/thrift.gemspec index 291b81fbc..fcc344f48 100644 --- a/vendor/git.apache.org/thrift.git/lib/rb/thrift.gemspec +++ b/vendor/git.apache.org/thrift.git/lib/rb/thrift.gemspec @@ -3,14 +3,14 @@ $:.push File.expand_path("../lib", __FILE__) Gem::Specification.new do |s| s.name = 'thrift' - s.version = '0.11.0.0' + s.version = '1.0.0.0' s.authors = ['Thrift Developers'] s.email = ['dev@thrift.apache.org'] s.homepage = 'http://thrift.apache.org' s.summary = %q{Ruby bindings for Apache Thrift} s.description = %q{Ruby bindings for the Apache Thrift RPC system} - s.license = 'Apache 2.0' - s.extensions = ['ext/extconf.rb'] + s.license = 'Apache-2.0' + s.extensions = ['ext/extconf.rb'] s.has_rdoc = true s.rdoc_options = %w[--line-numbers --inline-source --title Thrift --main README] @@ -27,11 +27,14 @@ Gem::Specification.new do |s| s.require_paths = %w[lib ext] - s.add_development_dependency 'rspec', ['>= 2.10.0', '< 2.14.0'] - s.add_development_dependency "rack", "~> 1.5" - s.add_development_dependency "rack-test", "~> 0.6" - s.add_development_dependency "thin", "~> 1.5" - s.add_development_dependency "bundler", "~> 1" - s.add_development_dependency 'rake', '~> 10.5' + s.add_development_dependency 'bundler', '~> 1.11' + s.add_development_dependency 'pry', '~> 0.11.3' + s.add_development_dependency 'pry-byebug', '~> 3.6' + s.add_development_dependency 'pry-stack_explorer', '~> 0.4.9.2' + s.add_development_dependency 'rack', '~> 2.0' + s.add_development_dependency 'rack-test', '~> 0.8.3' + s.add_development_dependency 'rake', '~> 12.3' + s.add_development_dependency 'rspec', '~> 3.5' + s.add_development_dependency 'thin', '~> 1.7' end diff --git a/vendor/git.apache.org/thrift.git/lib/rs/Cargo.toml b/vendor/git.apache.org/thrift.git/lib/rs/Cargo.toml index 8e68175cf..f5844b42a 100644 --- a/vendor/git.apache.org/thrift.git/lib/rs/Cargo.toml +++ b/vendor/git.apache.org/thrift.git/lib/rs/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "thrift" description = "Rust bindings for the Apache Thrift RPC system" -version = "0.11.0" +version = "1.0.0" license = "Apache-2.0" authors = ["Apache Thrift Developers "] homepage = "http://thrift.apache.org" @@ -11,7 +11,7 @@ exclude = ["Makefile*", "test/**"] keywords = ["thrift"] [dependencies] -byteorder = "~1.1.0" +byteorder = "~1.2.1" integer-encoding = "~1.0.4" log = "~0.3.8" threadpool = "~1.7.1" diff --git a/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/binary.rs b/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/binary.rs index 171073360..8505b6334 100644 --- a/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/binary.rs +++ b/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/binary.rs @@ -312,13 +312,6 @@ where transport: transport, } } - - fn write_transport(&mut self, buf: &[u8]) -> ::Result<()> { - self.transport - .write(buf) - .map(|_| ()) - .map_err(From::from) - } } impl TOutputProtocol for TBinaryOutputProtocol @@ -384,7 +377,7 @@ where fn write_bytes(&mut self, b: &[u8]) -> ::Result<()> { self.write_i32(b.len() as i32)?; - self.write_transport(b) + self.transport.write_all(b).map_err(From::from) } fn write_bool(&mut self, b: bool) -> ::Result<()> { diff --git a/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/compact.rs b/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/compact.rs index dfe11f852..1e67f4931 100644 --- a/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/compact.rs +++ b/vendor/git.apache.org/thrift.git/lib/rs/src/protocol/compact.rs @@ -19,6 +19,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use integer_encoding::{VarIntReader, VarIntWriter}; use std::convert::From; use try_from::TryFrom; +use std::io; use transport::{TReadTransport, TWriteTransport}; use super::{TFieldIdentifier, TInputProtocol, TInputProtocolFactory, TListIdentifier, @@ -314,6 +315,16 @@ where } } + +impl io::Seek for TCompactInputProtocol +where + T: io::Seek + TReadTransport, +{ + fn seek(&mut self, pos: io::SeekFrom) -> io::Result { + self.transport.seek(pos) + } +} + /// Factory for creating instances of `TCompactInputProtocol`. #[derive(Default)] pub struct TCompactInputProtocolFactory; diff --git a/vendor/git.apache.org/thrift.git/lib/rs/src/transport/buffered.rs b/vendor/git.apache.org/thrift.git/lib/rs/src/transport/buffered.rs index b588ec1a7..41b941c3e 100644 --- a/vendor/git.apache.org/thrift.git/lib/rs/src/transport/buffered.rs +++ b/vendor/git.apache.org/thrift.git/lib/rs/src/transport/buffered.rs @@ -174,6 +174,7 @@ where C: Write, { buf: Vec, + cap: usize, channel: C, } @@ -191,8 +192,11 @@ where /// `read_capacity` and an internal write buffer of size /// `write_capacity` that wraps the given `TIoChannel`. pub fn with_capacity(write_capacity: usize, channel: C) -> TBufferedWriteTransport { + assert!(write_capacity > 0, "write buffer size must be a positive integer"); + TBufferedWriteTransport { buf: Vec::with_capacity(write_capacity), + cap: write_capacity, channel: channel, } } @@ -203,13 +207,28 @@ where C: Write, { fn write(&mut self, buf: &[u8]) -> io::Result { - let avail_bytes = cmp::min(buf.len(), self.buf.capacity() - self.buf.len()); - self.buf.extend_from_slice(&buf[..avail_bytes]); - assert!( - self.buf.len() <= self.buf.capacity(), - "copy overflowed buffer" - ); - Ok(avail_bytes) + if !buf.is_empty() { + let mut avail_bytes; + + loop { + avail_bytes = cmp::min(buf.len(), self.cap - self.buf.len()); + + if avail_bytes == 0 { + self.flush()?; + } else { + break; + } + } + + let avail_bytes = avail_bytes; + + self.buf.extend_from_slice(&buf[..avail_bytes]); + assert!(self.buf.len() <= self.cap, "copy overflowed buffer"); + + Ok(avail_bytes) + } else { + Ok(0) + } } fn flush(&mut self) -> io::Result<()> { @@ -364,14 +383,21 @@ mod tests { } #[test] - fn must_return_zero_if_nothing_can_be_written() { + fn must_return_error_when_nothing_can_be_written_to_underlying_channel() { let mem = TBufferChannel::with_capacity(0, 0); - let mut t = TBufferedWriteTransport::with_capacity(0, mem); + let mut t = TBufferedWriteTransport::with_capacity(1, mem); let b = vec![0; 10]; let r = t.write(&b); - assert_eq!(r.unwrap(), 0); + // should have written 1 byte + assert_eq!(r.unwrap(), 1); + + // let's try again... + let r = t.write(&b[1..]); + + // this time we'll error out because the auto-flush failed + assert!(r.is_err()); } #[test] @@ -387,23 +413,35 @@ mod tests { } #[test] - fn must_return_zero_if_write_buffer_full() { - let mem = TBufferChannel::with_capacity(0, 0); + fn must_auto_flush_if_write_buffer_full() { + let mem = TBufferChannel::with_capacity(0, 8); let mut t = TBufferedWriteTransport::with_capacity(4, mem); - let b = [0x00, 0x01, 0x02, 0x03]; + let b0 = [0x00, 0x01, 0x02, 0x03]; + let b1 = [0x04, 0x05, 0x06, 0x07]; - // we've now filled the write buffer - let r = t.write(&b); + // write the first 4 bytes; we've now filled the transport's write buffer + let r = t.write(&b0); assert_eq!(r.unwrap(), 4); - // try write the same bytes again - nothing should be writable - let r = t.write(&b); - assert_eq!(r.unwrap(), 0); + // try write the next 4 bytes; this causes the transport to auto-flush the first 4 bytes + let r = t.write(&b1); + assert_eq!(r.unwrap(), 4); + + // check that in writing the second 4 bytes we auto-flushed the first 4 bytes + assert_eq_transport_num_written_bytes!(t, 4); + assert_eq_transport_written_bytes!(t, b0); + t.channel.empty_write_buffer(); + + // now flush the transport to push the second 4 bytes to the underlying channel + assert!(t.flush().is_ok()); + + // check that we wrote out the second 4 bytes + assert_eq_transport_written_bytes!(t, b1); } #[test] - fn must_only_write_to_inner_transport_on_flush() { + fn must_write_to_inner_transport_on_flush() { let mem = TBufferChannel::with_capacity(10, 10); let mut t = TBufferedWriteTransport::new(mem); diff --git a/vendor/git.apache.org/thrift.git/lib/rs/src/transport/framed.rs b/vendor/git.apache.org/thrift.git/lib/rs/src/transport/framed.rs index d78d2f7a1..7e0f8b6c3 100644 --- a/vendor/git.apache.org/thrift.git/lib/rs/src/transport/framed.rs +++ b/vendor/git.apache.org/thrift.git/lib/rs/src/transport/framed.rs @@ -18,7 +18,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use std::cmp; use std::io; -use std::io::{ErrorKind, Read, Write}; +use std::io::{Read, Write}; use super::{TReadTransport, TReadTransportFactory, TWriteTransport, TWriteTransportFactory}; @@ -57,7 +57,7 @@ pub struct TFramedReadTransport where C: Read, { - buf: Box<[u8]>, + buf: Vec, pos: usize, cap: usize, chan: C, @@ -67,18 +67,17 @@ impl TFramedReadTransport where C: Read, { - /// Create a `TFramedTransport` with default-sized internal read and - /// write buffers that wraps the given `TIoChannel`. + /// Create a `TFramedReadTransport` with a default-sized + /// internal read buffer that wraps the given `TIoChannel`. pub fn new(channel: C) -> TFramedReadTransport { TFramedReadTransport::with_capacity(READ_CAPACITY, channel) } - /// Create a `TFramedTransport` with an internal read buffer of size - /// `read_capacity` and an internal write buffer of size - /// `write_capacity` that wraps the given `TIoChannel`. + /// Create a `TFramedTransport` with an internal read buffer + /// of size `read_capacity` that wraps the given `TIoChannel`. pub fn with_capacity(read_capacity: usize, channel: C) -> TFramedReadTransport { TFramedReadTransport { - buf: vec![0; read_capacity].into_boxed_slice(), + buf: vec![0; read_capacity], // FIXME: do I actually have to do this? pos: 0, cap: 0, chan: channel, @@ -93,22 +92,13 @@ where fn read(&mut self, b: &mut [u8]) -> io::Result { if self.cap - self.pos == 0 { let message_size = self.chan.read_i32::()? as usize; - if message_size > self.buf.len() { - return Err( - io::Error::new( - ErrorKind::Other, - format!( - "bytes to be read ({}) exceeds buffer \ - capacity ({})", - message_size, - self.buf.len() - ), - ), - ); - } + + let buf_capacity = cmp::max(message_size, READ_CAPACITY); + self.buf.resize(buf_capacity, 0); + self.chan.read_exact(&mut self.buf[..message_size])?; - self.pos = 0; self.cap = message_size as usize; + self.pos = 0; } let nread = cmp::min(b.len(), self.cap - self.pos); @@ -165,8 +155,7 @@ pub struct TFramedWriteTransport where C: Write, { - buf: Box<[u8]>, - pos: usize, + buf: Vec, channel: C, } @@ -174,20 +163,18 @@ impl TFramedWriteTransport where C: Write, { - /// Create a `TFramedTransport` with default-sized internal read and - /// write buffers that wraps the given `TIoChannel`. + /// Create a `TFramedWriteTransport` with default-sized internal + /// write buffer that wraps the given `TIoChannel`. pub fn new(channel: C) -> TFramedWriteTransport { TFramedWriteTransport::with_capacity(WRITE_CAPACITY, channel) } - /// Create a `TFramedTransport` with an internal read buffer of size - /// `read_capacity` and an internal write buffer of size - /// `write_capacity` that wraps the given `TIoChannel`. + /// Create a `TFramedWriteTransport` with an internal write buffer + /// of size `write_capacity` that wraps the given `TIoChannel`. pub fn with_capacity(write_capacity: usize, channel: C) -> TFramedWriteTransport { TFramedWriteTransport { - buf: vec![0; write_capacity].into_boxed_slice(), - pos: 0, - channel: channel, + buf: Vec::with_capacity(write_capacity), + channel, } } } @@ -197,28 +184,19 @@ where C: Write, { fn write(&mut self, b: &[u8]) -> io::Result { - if b.len() > (self.buf.len() - self.pos) { - return Err( - io::Error::new( - ErrorKind::Other, - format!( - "bytes to be written ({}) exceeds buffer \ - capacity ({})", - b.len(), - self.buf.len() - self.pos - ), - ), - ); + let current_capacity = self.buf.capacity(); + let available_space = current_capacity - self.buf.len(); + if b.len() > available_space { + let additional_space = cmp::max(b.len() - available_space, current_capacity); + self.buf.reserve(additional_space); } - let nwrite = b.len(); // always less than available write buffer capacity - self.buf[self.pos..(self.pos + nwrite)].clone_from_slice(b); - self.pos += nwrite; - Ok(nwrite) + self.buf.extend_from_slice(b); + Ok(b.len()) } fn flush(&mut self) -> io::Result<()> { - let message_size = self.pos; + let message_size = self.buf.len(); if let 0 = message_size { return Ok(()); @@ -227,13 +205,17 @@ where .write_i32::(message_size as i32)?; } + // will spin if the underlying channel can't be written to let mut byte_index = 0; - while byte_index < self.pos { - let nwrite = self.channel.write(&self.buf[byte_index..self.pos])?; - byte_index = cmp::min(byte_index + nwrite, self.pos); + while byte_index < message_size { + let nwrite = self.channel.write(&self.buf[byte_index..message_size])?; + byte_index = cmp::min(byte_index + nwrite, message_size); } - self.pos = 0; + let buf_capacity = cmp::min(self.buf.capacity(), WRITE_CAPACITY); + self.buf.resize(buf_capacity, 0); + self.buf.clear(); + self.channel.flush() } } @@ -257,8 +239,230 @@ impl TWriteTransportFactory for TFramedWriteTransportFactory { #[cfg(test)] mod tests { - // use std::io::{Read, Write}; - // - // use super::*; - // use ::transport::mem::TBufferChannel; + use super::*; + use ::transport::mem::TBufferChannel; + + // FIXME: test a forced reserve + + #[test] + fn must_read_message_smaller_than_initial_buffer_size() { + let c = TBufferChannel::with_capacity(10, 10); + let mut t = TFramedReadTransport::with_capacity(8, c); + + t.chan.set_readable_bytes( + &[ + 0x00, 0x00, 0x00, 0x04, /* message size */ + 0x00, 0x01, 0x02, 0x03 /* message body */ + ] + ); + + let mut buf = vec![0; 8]; + + // we've read exactly 4 bytes + assert_eq!(t.read(&mut buf).unwrap(), 4); + assert_eq!(&buf[..4], &[0x00, 0x01, 0x02, 0x03]); + } + + #[test] + fn must_read_message_greater_than_initial_buffer_size() { + let c = TBufferChannel::with_capacity(10, 10); + let mut t = TFramedReadTransport::with_capacity(2, c); + + t.chan.set_readable_bytes( + &[ + 0x00, 0x00, 0x00, 0x04, /* message size */ + 0x00, 0x01, 0x02, 0x03 /* message body */ + ] + ); + + let mut buf = vec![0; 8]; + + // we've read exactly 4 bytes + assert_eq!(t.read(&mut buf).unwrap(), 4); + assert_eq!(&buf[..4], &[0x00, 0x01, 0x02, 0x03]); + } + + #[test] + fn must_read_multiple_messages_in_sequence_correctly() { + let c = TBufferChannel::with_capacity(10, 10); + let mut t = TFramedReadTransport::with_capacity(2, c); + + // + // 1st message + // + + t.chan.set_readable_bytes( + &[ + 0x00, 0x00, 0x00, 0x04, /* message size */ + 0x00, 0x01, 0x02, 0x03 /* message body */ + ] + ); + + let mut buf = vec![0; 8]; + + // we've read exactly 4 bytes + assert_eq!(t.read(&mut buf).unwrap(), 4); + assert_eq!(&buf, &[0x00, 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00]); + + // + // 2nd message + // + + t.chan.set_readable_bytes( + &[ + 0x00, 0x00, 0x00, 0x01, /* message size */ + 0x04 /* message body */ + ] + ); + + let mut buf = vec![0; 8]; + + // we've read exactly 1 byte + assert_eq!(t.read(&mut buf).unwrap(), 1); + assert_eq!(&buf, &[0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]); + } + + #[test] + fn must_write_message_smaller_than_buffer_size() { + let mem = TBufferChannel::with_capacity(0, 0); + let mut t = TFramedWriteTransport::with_capacity(20, mem); + + let b = vec![0; 10]; + + // should have written 10 bytes + assert_eq!(t.write(&b).unwrap(), 10); + } + + #[test] + fn must_return_zero_if_caller_calls_write_with_empty_buffer() { + let mem = TBufferChannel::with_capacity(0, 10); + let mut t = TFramedWriteTransport::with_capacity(10, mem); + + let expected: [u8; 0] = []; + + assert_eq!(t.write(&[]).unwrap(), 0); + assert_eq_transport_written_bytes!(t, expected); + } + + #[test] + fn must_write_to_inner_transport_on_flush() { + let mem = TBufferChannel::with_capacity(10, 10); + let mut t = TFramedWriteTransport::new(mem); + + let b: [u8; 5] = [0x00, 0x01, 0x02, 0x03, 0x04]; + assert_eq!(t.write(&b).unwrap(), 5); + assert_eq_transport_num_written_bytes!(t, 0); + + assert!(t.flush().is_ok()); + + let expected_bytes = [ + 0x00, 0x00, 0x00, 0x05, /* message size */ + 0x00, 0x01, 0x02, 0x03, 0x04 /* message body */ + ]; + + assert_eq_transport_written_bytes!(t, expected_bytes); + } + + #[test] + fn must_write_message_greater_than_buffer_size_00() { + let mem = TBufferChannel::with_capacity(0, 10); + + // IMPORTANT: DO **NOT** CHANGE THE WRITE_CAPACITY OR THE NUMBER OF BYTES TO BE WRITTEN! + // these lengths were chosen to be just long enough + // that doubling the capacity is a **worse** choice than + // simply resizing the buffer to b.len() + + let mut t = TFramedWriteTransport::with_capacity(1, mem); + let b = [0x00, 0x01, 0x02]; + + // should have written 3 bytes + assert_eq!(t.write(&b).unwrap(), 3); + assert_eq_transport_num_written_bytes!(t, 0); + + assert!(t.flush().is_ok()); + + let expected_bytes = [ + 0x00, 0x00, 0x00, 0x03, /* message size */ + 0x00, 0x01, 0x02 /* message body */ + ]; + + assert_eq_transport_written_bytes!(t, expected_bytes); + } + + #[test] + fn must_write_message_greater_than_buffer_size_01() { + let mem = TBufferChannel::with_capacity(0, 10); + + // IMPORTANT: DO **NOT** CHANGE THE WRITE_CAPACITY OR THE NUMBER OF BYTES TO BE WRITTEN! + // these lengths were chosen to be just long enough + // that doubling the capacity is a **better** choice than + // simply resizing the buffer to b.len() + + let mut t = TFramedWriteTransport::with_capacity(2, mem); + let b = [0x00, 0x01, 0x02]; + + // should have written 3 bytes + assert_eq!(t.write(&b).unwrap(), 3); + assert_eq_transport_num_written_bytes!(t, 0); + + assert!(t.flush().is_ok()); + + let expected_bytes = [ + 0x00, 0x00, 0x00, 0x03, /* message size */ + 0x00, 0x01, 0x02 /* message body */ + ]; + + assert_eq_transport_written_bytes!(t, expected_bytes); + } + + #[test] + fn must_return_error_if_nothing_can_be_written_to_inner_transport_on_flush() { + let mem = TBufferChannel::with_capacity(0, 0); + let mut t = TFramedWriteTransport::with_capacity(1, mem); + + let b = vec![0; 10]; + + // should have written 10 bytes + assert_eq!(t.write(&b).unwrap(), 10); + + // let's flush + let r = t.flush(); + + // this time we'll error out because the flush can't write to the underlying channel + assert!(r.is_err()); + } + + #[test] + fn must_write_successfully_after_flush() { + // IMPORTANT: write capacity *MUST* be greater + // than message sizes used in this test + 4-byte frame header + let mem = TBufferChannel::with_capacity(0, 10); + let mut t = TFramedWriteTransport::with_capacity(5, mem); + + // write and flush + let first_message: [u8; 5] = [0x00, 0x01, 0x02, 0x03, 0x04]; + assert_eq!(t.write(&first_message).unwrap(), 5); + assert!(t.flush().is_ok()); + + let mut expected = Vec::new(); + expected.write_all(&[0x00, 0x00, 0x00, 0x05]).unwrap(); // message size + expected.extend_from_slice(&first_message); + + // check the flushed bytes + assert_eq!(t.channel.write_bytes(), expected); + + // reset our underlying transport + t.channel.empty_write_buffer(); + + let second_message: [u8; 3] = [0x05, 0x06, 0x07]; + assert_eq!(t.write(&second_message).unwrap(), 3); + assert!(t.flush().is_ok()); + + expected.clear(); + expected.write_all(&[0x00, 0x00, 0x00, 0x03]).unwrap(); // message size + expected.extend_from_slice(&second_message); + + // check the flushed bytes + assert_eq!(t.channel.write_bytes(), expected); + } } diff --git a/vendor/git.apache.org/thrift.git/lib/rs/src/transport/socket.rs b/vendor/git.apache.org/thrift.git/lib/rs/src/transport/socket.rs index 727bba3f5..a6f780ac8 100644 --- a/vendor/git.apache.org/thrift.git/lib/rs/src/transport/socket.rs +++ b/vendor/git.apache.org/thrift.git/lib/rs/src/transport/socket.rs @@ -156,7 +156,7 @@ impl Read for TTcpChannel { impl Write for TTcpChannel { fn write(&mut self, b: &[u8]) -> io::Result { - self.if_set(|s| s.write_all(b)).map(|_| b.len()) + self.if_set(|s| s.write(b)) } fn flush(&mut self) -> io::Result<()> { diff --git a/vendor/git.apache.org/thrift.git/lib/st/package.xml b/vendor/git.apache.org/thrift.git/lib/st/package.xml index 5044e2aa4..72194fc93 100644 --- a/vendor/git.apache.org/thrift.git/lib/st/package.xml +++ b/vendor/git.apache.org/thrift.git/lib/st/package.xml @@ -17,7 +17,7 @@ specific language governing permissions and limitations under the License. --> - + libthrift-st thrift.st diff --git a/vendor/git.apache.org/thrift.git/package-lock.json b/vendor/git.apache.org/thrift.git/package-lock.json new file mode 100644 index 000000000..27bd5e92d --- /dev/null +++ b/vendor/git.apache.org/thrift.git/package-lock.json @@ -0,0 +1,2028 @@ +{ + "name": "thrift", + "version": "1.0.0-dev", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "abbrev": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz", + "integrity": "sha1-kbR5JYinc4wl813W9jdSovh3YTU=", + "dev": true + }, + "ajv": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", + "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=", + "dev": true, + "requires": { + "co": "4.6.0", + "fast-deep-equal": "1.1.0", + "fast-json-stable-stringify": "2.0.0", + "json-schema-traverse": "0.3.1" + } + }, + "align-text": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/align-text/-/align-text-0.1.4.tgz", + "integrity": "sha1-DNkKVhCT810KmSVsIrcGlDP60Rc=", + "dev": true, + "requires": { + "kind-of": "3.2.2", + "longest": "1.0.1", + "repeat-string": "1.6.1" + } + }, + "amdefine": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", + "integrity": "sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=", + "dev": true + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "aproba": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", + "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==", + "dev": true + }, + "are-we-there-yet": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz", + "integrity": "sha1-u13KOCu5TwXhUZQ3PRb9O6HKEQ0=", + "dev": true, + "requires": { + "delegates": "1.0.0", + "readable-stream": "2.3.5" + } + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "1.0.3" + } + }, + "asn1": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.3.tgz", + "integrity": "sha1-2sh4dxPJlmhJ/IGAd36+nB3fO4Y=", + "dev": true + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "dev": true + }, + "async": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/async/-/async-1.5.2.tgz", + "integrity": "sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo=", + "dev": true + }, + "async-limiter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz", + "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==" + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", + "dev": true + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "dev": true + }, + "aws4": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.6.0.tgz", + "integrity": "sha1-g+9cqGCysy5KDe7e6MdxudtXRx4=", + "dev": true + }, + "babylon": { + "version": "7.0.0-beta.19", + "resolved": "https://registry.npmjs.org/babylon/-/babylon-7.0.0-beta.19.tgz", + "integrity": "sha512-Vg0C9s/REX6/WIXN37UKpv5ZhRi6A4pjHlpkE34+8/a6c2W1Q692n3hmc+SZG5lKRnaExLUbxtJ1SVT+KaCQ/A==", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz", + "integrity": "sha1-Y7xdy2EzG5K8Bf1SiVPDNGKgb40=", + "dev": true, + "optional": true, + "requires": { + "tweetnacl": "0.14.5" + } + }, + "bindings": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.3.0.tgz", + "integrity": "sha512-DpLh5EzMR2kzvX1KIlVC0VkC3iZtHKTgdtZ0a3pglBZdaQFjt5S9g9xd1lE+YvXyfd6mtCeRnrUfOLYiTMlNSw==", + "dev": true + }, + "bl": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.0.3.tgz", + "integrity": "sha1-/FQhoo/UImA2w7OJGmaiW8ZNIm4=", + "dev": true, + "requires": { + "readable-stream": "2.0.6" + }, + "dependencies": { + "process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=", + "dev": true + }, + "readable-stream": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz", + "integrity": "sha1-j5A0HmilPMySh4jaz80Rs265t44=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "1.0.7", + "string_decoder": "0.10.31", + "util-deprecate": "1.0.2" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + } + } + }, + "bluebird": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz", + "integrity": "sha512-MKiLiV+I1AA596t9w1sQJ8jkiSr5+ZKi0WKrYGUn6d1Fx+Ij4tIj+m2WMQSGczs5jZVxV339chE8iwk6F64wjA==", + "dev": true + }, + "boom": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/boom/-/boom-4.3.1.tgz", + "integrity": "sha1-T4owBctKfjiJ90kDD9JbluAdLjE=", + "dev": true, + "requires": { + "hoek": "4.2.1" + } + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "1.0.0", + "concat-map": "0.0.1" + } + }, + "buffer-equals": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/buffer-equals/-/buffer-equals-1.0.4.tgz", + "integrity": "sha1-A1O1T9B/2VZBcGca5vZrnPENJ/U=", + "dev": true + }, + "camelcase": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", + "integrity": "sha1-m7UwTS4LVmmLLHWLCKPqqdqlijk=", + "dev": true, + "optional": true + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", + "dev": true + }, + "catharsis": { + "version": "0.8.9", + "resolved": "https://registry.npmjs.org/catharsis/-/catharsis-0.8.9.tgz", + "integrity": "sha1-mMyJDKZS3S7w5ws3klMQ/56Q/Is=", + "dev": true, + "requires": { + "underscore-contrib": "0.3.0" + } + }, + "center-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz", + "integrity": "sha1-qg0yYptu6XIgBBHL1EYckHvCt60=", + "dev": true, + "optional": true, + "requires": { + "align-text": "0.1.4", + "lazy-cache": "1.0.4" + } + }, + "chownr": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.0.1.tgz", + "integrity": "sha1-4qdQQqlVGQi+vSW4Uj1fl2nXkYE=", + "dev": true + }, + "cliui": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-2.1.0.tgz", + "integrity": "sha1-S0dXYP+AJkx2LDoXGQMukcf+oNE=", + "dev": true, + "optional": true, + "requires": { + "center-align": "0.1.3", + "right-align": "0.1.3", + "wordwrap": "0.0.2" + }, + "dependencies": { + "wordwrap": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", + "integrity": "sha1-t5Zpu0LstAn4PVg8rVLKF+qhZD8=", + "dev": true, + "optional": true + } + } + }, + "co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=", + "dev": true + }, + "code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", + "dev": true + }, + "combined-stream": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz", + "integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=", + "dev": true, + "requires": { + "delayed-stream": "1.0.0" + } + }, + "commander": { + "version": "2.14.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.14.1.tgz", + "integrity": "sha512-+YR16o3rK53SmWHU3rEM3tPAh2rwb1yPcQX5irVn7mb0gXbwuCCrnkbV5+PBfETdfg1vui07nM6PCG1zndcjQw==", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.0.tgz", + "integrity": "sha1-CqxmL9Ur54lk1VMvaUeE5wEQrPc=", + "dev": true, + "requires": { + "inherits": "2.0.3", + "readable-stream": "2.3.5", + "typedarray": "0.0.6" + } + }, + "connect": { + "version": "3.6.6", + "resolved": "https://registry.npmjs.org/connect/-/connect-3.6.6.tgz", + "integrity": "sha1-Ce/2xVr3I24TcTWnJXSFi2eG9SQ=", + "dev": true, + "requires": { + "debug": "2.6.9", + "finalhandler": "1.1.0", + "parseurl": "1.3.2", + "utils-merge": "1.0.1" + } + }, + "console-control-strings": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", + "dev": true + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cryptiles": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-3.1.2.tgz", + "integrity": "sha1-qJ+7Ig9c4l7FboxKqKT9e1sNKf4=", + "dev": true, + "requires": { + "boom": "5.2.0" + }, + "dependencies": { + "boom": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/boom/-/boom-5.2.0.tgz", + "integrity": "sha512-Z5BTk6ZRe4tXXQlkqftmsAUANpXmuwlsF5Oov8ThoMbQRzdGTA1ngYRW160GexgOgjsFOKJz0LYhoNi+2AMBUw==", + "dev": true, + "requires": { + "hoek": "4.2.1" + } + } + } + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "dev": true, + "requires": { + "assert-plus": "1.0.0" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", + "dev": true, + "optional": true + }, + "deep-extend": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.4.2.tgz", + "integrity": "sha1-SLaZwn4zS/ifEIkr5DL25MfTSn8=", + "dev": true + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", + "dev": true + }, + "define-properties": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.2.tgz", + "integrity": "sha1-g6c/L+pWmJj7c3GTyPhzyvbUXJQ=", + "dev": true, + "requires": { + "foreach": "2.0.5", + "object-keys": "1.0.11" + }, + "dependencies": { + "object-keys": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.11.tgz", + "integrity": "sha1-xUYBd4rVYPEULODgG8yotW0TQm0=", + "dev": true + } + } + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "dev": true + }, + "delegates": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", + "dev": true + }, + "ecc-jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz", + "integrity": "sha1-D8c6ntXw1Tw4GTOYUj735UN3dQU=", + "dev": true, + "optional": true, + "requires": { + "jsbn": "0.1.1" + } + }, + "ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=", + "dev": true + }, + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", + "dev": true + }, + "end-of-stream": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.1.tgz", + "integrity": "sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q==", + "dev": true, + "requires": { + "once": "1.4.0" + } + }, + "es-abstract": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.10.0.tgz", + "integrity": "sha512-/uh/DhdqIOSkAWifU+8nG78vlQxdLckUdI/sPgy0VhuXi2qJ7T8czBmqIYtLQVpCIFYafChnsRsB5pyb1JdmCQ==", + "dev": true, + "requires": { + "es-to-primitive": "1.1.1", + "function-bind": "1.1.1", + "has": "1.0.1", + "is-callable": "1.1.3", + "is-regex": "1.0.4" + } + }, + "es-to-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.1.1.tgz", + "integrity": "sha1-RTVSSKiJeQNLZ5Lhm7gfK3l13Q0=", + "dev": true, + "requires": { + "is-callable": "1.1.3", + "is-date-object": "1.0.1", + "is-symbol": "1.0.1" + } + }, + "es6-promise": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.4.tgz", + "integrity": "sha512-/NdNZVJg+uZgtm9eS3O6lrOLYmQag2DjdEXuPaHlZ6RuVqgqaVZfgYCepEIKsLqwdQArOPtC3XzRLqGGfT8KQQ==", + "dev": true + }, + "escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "escodegen": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.8.1.tgz", + "integrity": "sha1-WltTr0aTEQvrsIZ6o0MN07cKEBg=", + "dev": true, + "requires": { + "esprima": "2.7.3", + "estraverse": "1.9.3", + "esutils": "2.0.2", + "optionator": "0.8.2", + "source-map": "0.2.0" + } + }, + "esprima": { + "version": "2.7.3", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-2.7.3.tgz", + "integrity": "sha1-luO3DVd59q1JzQMmc9HDEnZ7pYE=", + "dev": true + }, + "estraverse": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-1.9.3.tgz", + "integrity": "sha1-r2fy3JIlgkFZUJJgkaQAXSnJu0Q=", + "dev": true + }, + "esutils": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz", + "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=", + "dev": true + }, + "expand-template": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-1.1.0.tgz", + "integrity": "sha512-kkjwkMqj0h4w/sb32ERCDxCQkREMCAgS39DscDnSwDsbxnwwM1BTZySdC3Bn1lhY7vL08n9GoO/fVTynjDgRyQ==", + "dev": true + }, + "extend": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.1.tgz", + "integrity": "sha1-p1Xqe8Gt/MWjHOfnYtuq3F5jZEQ=", + "dev": true + }, + "extract-zip": { + "version": "1.6.6", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.6.6.tgz", + "integrity": "sha1-EpDt6NINCHK0Kf0/NRyhKOxe+Fw=", + "dev": true, + "requires": { + "concat-stream": "1.6.0", + "debug": "2.6.9", + "mkdirp": "0.5.0", + "yauzl": "2.4.1" + }, + "dependencies": { + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + }, + "mkdirp": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", + "integrity": "sha1-HXMHam35hs2TROFecfzAWkyavxI=", + "dev": true, + "requires": { + "minimist": "0.0.8" + } + } + } + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", + "dev": true + }, + "fast-deep-equal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz", + "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true + }, + "fd-slicer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.0.1.tgz", + "integrity": "sha1-i1vL2ewyfFBBv5qwI/1nUPEXfmU=", + "dev": true, + "requires": { + "pend": "1.2.0" + } + }, + "finalhandler": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.0.tgz", + "integrity": "sha1-zgtoVbRYU+eRsvzGgARtiCU91/U=", + "dev": true, + "requires": { + "debug": "2.6.9", + "encodeurl": "1.0.2", + "escape-html": "1.0.3", + "on-finished": "2.3.0", + "parseurl": "1.3.2", + "statuses": "1.3.1", + "unpipe": "1.0.0" + } + }, + "for-each": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.2.tgz", + "integrity": "sha1-LEBFC5NI6X8oEyJZO6lnBLmr1NQ=", + "dev": true, + "requires": { + "is-function": "1.0.1" + } + }, + "foreach": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", + "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=", + "dev": true + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", + "dev": true + }, + "form-data": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz", + "integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=", + "dev": true, + "requires": { + "asynckit": "0.4.0", + "combined-stream": "1.0.6", + "mime-types": "2.1.18" + } + }, + "fs-extra": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-1.0.0.tgz", + "integrity": "sha1-zTzl9+fLYUWIP8rjGR6Yd/hYeVA=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "jsonfile": "2.4.0", + "klaw": "1.3.1" + }, + "dependencies": { + "klaw": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-1.3.1.tgz", + "integrity": "sha1-QIhDO0azsbolnXh4XY6W9zugJDk=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + } + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "gauge": { + "version": "2.7.4", + "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz", + "integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=", + "dev": true, + "requires": { + "aproba": "1.2.0", + "console-control-strings": "1.1.0", + "has-unicode": "2.0.1", + "object-assign": "4.1.1", + "signal-exit": "3.0.2", + "string-width": "1.0.2", + "strip-ansi": "3.0.1", + "wide-align": "1.1.2" + } + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "dev": true, + "requires": { + "assert-plus": "1.0.0" + } + }, + "github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha1-l/tdlr/eiXMxPyDoKI75oWf6ZM4=", + "dev": true + }, + "glob": { + "version": "5.0.15", + "resolved": "https://registry.npmjs.org/glob/-/glob-5.0.15.tgz", + "integrity": "sha1-G8k2ueAvSmA/zCIuz3Yz0wuLk7E=", + "dev": true, + "requires": { + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "graceful-fs": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", + "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=", + "dev": true + }, + "handlebars": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.11.tgz", + "integrity": "sha1-Ywo13+ApS8KB7a5v/F0yn8eYLcw=", + "dev": true, + "requires": { + "async": "1.5.2", + "optimist": "0.6.1", + "source-map": "0.4.4", + "uglify-js": "2.8.29" + }, + "dependencies": { + "source-map": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.4.4.tgz", + "integrity": "sha1-66T12pwNyZneaAMti092FzZSA2s=", + "dev": true, + "requires": { + "amdefine": "1.0.1" + } + } + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", + "dev": true + }, + "har-validator": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.0.3.tgz", + "integrity": "sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0=", + "dev": true, + "requires": { + "ajv": "5.5.2", + "har-schema": "2.0.0" + } + }, + "has": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.1.tgz", + "integrity": "sha1-hGFzP1OLCDfJNh45qauelwTcLyg=", + "dev": true, + "requires": { + "function-bind": "1.1.1" + } + }, + "has-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz", + "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=", + "dev": true + }, + "has-unicode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", + "dev": true + }, + "hasha": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-2.2.0.tgz", + "integrity": "sha1-eNfL/B5tZjA/55g3NlmEUXsvbuE=", + "dev": true, + "requires": { + "is-stream": "1.1.0", + "pinkie-promise": "2.0.1" + } + }, + "hawk": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/hawk/-/hawk-6.0.2.tgz", + "integrity": "sha512-miowhl2+U7Qle4vdLqDdPt9m09K6yZhkLDTWGoUiUzrQCn+mHHSmfJgAyGaLRZbPmTqfFFjRV1QWCW0VWUJBbQ==", + "dev": true, + "requires": { + "boom": "4.3.1", + "cryptiles": "3.1.2", + "hoek": "4.2.1", + "sntp": "2.1.0" + } + }, + "hoek": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/hoek/-/hoek-4.2.1.tgz", + "integrity": "sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA==", + "dev": true + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "jsprim": "1.4.1", + "sshpk": "1.13.1" + } + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "1.4.0", + "wrappy": "1.0.2" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + }, + "ini": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", + "dev": true + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "is-callable": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.3.tgz", + "integrity": "sha1-hut1OSgF3cM69xySoO7fdO52BLI=", + "dev": true + }, + "is-date-object": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.1.tgz", + "integrity": "sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "dev": true, + "requires": { + "number-is-nan": "1.0.1" + } + }, + "is-function": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-function/-/is-function-1.0.1.tgz", + "integrity": "sha1-Es+5i2W1fdPRk6MSH19uL0N2ArU=", + "dev": true + }, + "is-regex": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.4.tgz", + "integrity": "sha1-VRdIm1RwkbCTDglWVM7SXul+lJE=", + "dev": true, + "requires": { + "has": "1.0.1" + } + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true + }, + "is-symbol": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.1.tgz", + "integrity": "sha1-PMWfAAJRlLarLjjbrmaJJWtmBXI=", + "dev": true + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", + "dev": true + }, + "istanbul": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/istanbul/-/istanbul-0.4.5.tgz", + "integrity": "sha1-ZcfXPUxNqE1POsMQuRj7C4Azczs=", + "dev": true, + "requires": { + "abbrev": "1.0.9", + "async": "1.5.2", + "escodegen": "1.8.1", + "esprima": "2.7.3", + "glob": "5.0.15", + "handlebars": "4.0.11", + "js-yaml": "3.11.0", + "mkdirp": "0.5.1", + "nopt": "3.0.6", + "once": "1.4.0", + "resolve": "1.1.7", + "supports-color": "3.2.3", + "which": "1.3.0", + "wordwrap": "1.0.0" + } + }, + "js-yaml": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.11.0.tgz", + "integrity": "sha512-saJstZWv7oNeOyBh3+Dx1qWzhW0+e6/8eDzo7p5rDFqxntSztloLtuKu+Ejhtq82jsilwOIZYsCz+lIjthg1Hw==", + "dev": true, + "requires": { + "argparse": "1.0.10", + "esprima": "4.0.0" + }, + "dependencies": { + "esprima": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.0.tgz", + "integrity": "sha512-oftTcaMu/EGrEIu904mWteKIv8vMuOgGYo7EhVJJN00R/EED9DCua/xxHRdYnKtcECzVg7xOWhflvJMnqcFZjw==", + "dev": true + } + } + }, + "js2xmlparser": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-3.0.0.tgz", + "integrity": "sha1-P7YOqgicVED5MZ9RdgzNB+JJlzM=", + "dev": true, + "requires": { + "xmlcreate": "1.0.2" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", + "dev": true, + "optional": true + }, + "jsdoc": { + "version": "3.5.5", + "resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-3.5.5.tgz", + "integrity": "sha512-6PxB65TAU4WO0Wzyr/4/YhlGovXl0EVYfpKbpSroSj0qBxT4/xod/l40Opkm38dRHRdQgdeY836M0uVnJQG7kg==", + "dev": true, + "requires": { + "babylon": "7.0.0-beta.19", + "bluebird": "3.5.1", + "catharsis": "0.8.9", + "escape-string-regexp": "1.0.5", + "js2xmlparser": "3.0.0", + "klaw": "2.0.0", + "marked": "0.3.17", + "mkdirp": "0.5.1", + "requizzle": "0.2.1", + "strip-json-comments": "2.0.1", + "taffydb": "2.6.2", + "underscore": "1.8.3" + } + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", + "dev": true + }, + "json-schema-traverse": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz", + "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "jsonfile": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz", + "integrity": "sha1-NzaitCi4e72gzIO1P6PWM6NcKug=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "kew": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/kew/-/kew-0.7.0.tgz", + "integrity": "sha1-edk9LTM2PW/dKXCzNdkUGtWR15s=", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "1.1.6" + } + }, + "klaw": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-2.0.0.tgz", + "integrity": "sha1-WcEo4Nxc5BAgEVEZTuucv4WGUPY=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + }, + "lazy-cache": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz", + "integrity": "sha1-odePw6UEdMuAhF07O24dpJpEbo4=", + "dev": true, + "optional": true + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "requires": { + "prelude-ls": "1.1.2", + "type-check": "0.3.2" + } + }, + "longest": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", + "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc=", + "dev": true + }, + "marked": { + "version": "0.3.17", + "resolved": "https://registry.npmjs.org/marked/-/marked-0.3.17.tgz", + "integrity": "sha512-+AKbNsjZl6jFfLPwHhWmGTqE009wTKn3RTmn9K8oUKHrX/abPJjtcRtXpYB/FFrwPJRUA86LX/de3T0knkPCmQ==", + "dev": true + }, + "mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "dev": true + }, + "mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dev": true, + "requires": { + "mime-db": "1.33.0" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "minimist": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", + "integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8=", + "dev": true + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "dev": true, + "requires": { + "minimist": "0.0.8" + }, + "dependencies": { + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + } + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "nan": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.8.0.tgz", + "integrity": "sha1-7XFfP+neArV6XmJS2QqWZ14fCFo=", + "dev": true + }, + "node-abi": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-2.3.0.tgz", + "integrity": "sha512-zwm6vU3SsVgw3e9fu48JBaRBCJGIvAgysDsqtf5+vEexFE71bEOtaMWb5zr/zODZNzTPtQlqUUpC79k68Hspow==", + "dev": true, + "requires": { + "semver": "5.5.0" + } + }, + "node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha1-h6kGXNs1XTGC2PlM4RGIuCXGijs=" + }, + "noop-logger": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/noop-logger/-/noop-logger-0.1.1.tgz", + "integrity": "sha1-lKKxYzxPExdVMAfYlm/Q6EG2pMI=", + "dev": true + }, + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1.0.9" + } + }, + "npmlog": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", + "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", + "dev": true, + "requires": { + "are-we-there-yet": "1.1.4", + "console-control-strings": "1.1.0", + "gauge": "2.7.4", + "set-blocking": "2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", + "dev": true + }, + "oauth-sign": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.8.2.tgz", + "integrity": "sha1-Rqarfwrq2N6unsBWV4C31O/rnUM=", + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-inspect": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.5.0.tgz", + "integrity": "sha512-UmOFbHbwvv+XHj7BerrhVq+knjceBdkvU5AriwLMvhv2qi+e7DJzxfBeFpILEjVzCp+xA+W/pIf06RGPWlZNfw==", + "dev": true + }, + "on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", + "dev": true, + "requires": { + "ee-first": "1.1.1" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1.0.2" + } + }, + "optimist": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", + "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=", + "dev": true, + "requires": { + "minimist": "0.0.10", + "wordwrap": "0.0.3" + }, + "dependencies": { + "wordwrap": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", + "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=", + "dev": true + } + } + }, + "optionator": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz", + "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=", + "dev": true, + "requires": { + "deep-is": "0.1.3", + "fast-levenshtein": "2.0.6", + "levn": "0.3.0", + "prelude-ls": "1.1.2", + "type-check": "0.3.2", + "wordwrap": "1.0.0" + } + }, + "os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=", + "dev": true + }, + "parseurl": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz", + "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-parse": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.5.tgz", + "integrity": "sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME=", + "dev": true + }, + "pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", + "dev": true + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", + "dev": true + }, + "phantomjs-prebuilt": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/phantomjs-prebuilt/-/phantomjs-prebuilt-2.1.16.tgz", + "integrity": "sha1-79ISpKOWbTZHaE6ouniFSb4q7+8=", + "dev": true, + "requires": { + "es6-promise": "4.2.4", + "extract-zip": "1.6.6", + "fs-extra": "1.0.0", + "hasha": "2.2.0", + "kew": "0.7.0", + "progress": "1.1.8", + "request": "2.83.0", + "request-progress": "2.0.1", + "which": "1.3.0" + } + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", + "dev": true + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "dev": true, + "requires": { + "pinkie": "2.0.4" + } + }, + "prebuild-install": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-2.3.0.tgz", + "integrity": "sha512-gzjq2oHB8oMbzJSsSh9MQ64zrXZGt092/uT4TLZlz2qnrPxpWqp4vYB7LZrDxnlxf5RfbCjkgDI/z0EIVuYzAw==", + "dev": true, + "requires": { + "expand-template": "1.1.0", + "github-from-package": "0.0.0", + "minimist": "1.2.0", + "mkdirp": "0.5.1", + "node-abi": "2.3.0", + "noop-logger": "0.1.1", + "npmlog": "4.1.2", + "os-homedir": "1.0.2", + "pump": "1.0.3", + "rc": "1.2.5", + "simple-get": "1.4.3", + "tar-fs": "1.16.0", + "tunnel-agent": "0.6.0", + "xtend": "4.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + }, + "xtend": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", + "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=", + "dev": true + } + } + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", + "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==", + "dev": true + }, + "progress": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/progress/-/progress-1.1.8.tgz", + "integrity": "sha1-4mDHj2Fhzdmw5WzD4Khd4Xx6V74=", + "dev": true + }, + "pump": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/pump/-/pump-1.0.3.tgz", + "integrity": "sha512-8k0JupWme55+9tCVE+FS5ULT3K6AbgqrGa58lTT49RpyfwwcGedHqaC5LlQNdEAumn/wFsu6aPwkuPMioy8kqw==", + "dev": true, + "requires": { + "end-of-stream": "1.4.1", + "once": "1.4.0" + } + }, + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + }, + "q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=" + }, + "qs": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", + "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==", + "dev": true + }, + "rc": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.5.tgz", + "integrity": "sha1-J1zWh/bjs2zHVrqibf7oCnkDAf0=", + "dev": true, + "requires": { + "deep-extend": "0.4.2", + "ini": "1.3.5", + "minimist": "1.2.0", + "strip-json-comments": "2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + } + } + }, + "readable-stream": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.5.tgz", + "integrity": "sha512-tK0yDhrkygt/knjowCUiWP9YdV7c5R+8cR0r/kt9ZhBU906Fs6RpQJCEilamRJj1Nx2rWI6LkW9gKqjTkshhEw==", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "2.0.0", + "safe-buffer": "5.1.1", + "string_decoder": "1.0.3", + "util-deprecate": "1.0.2" + } + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "request": { + "version": "2.83.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.83.0.tgz", + "integrity": "sha512-lR3gD69osqm6EYLk9wB/G1W/laGWjzH90t1vEa2xuxHD5KUrSzp9pUSfTm+YC5Nxt2T8nMPEvKlhbQayU7bgFw==", + "dev": true, + "requires": { + "aws-sign2": "0.7.0", + "aws4": "1.6.0", + "caseless": "0.12.0", + "combined-stream": "1.0.6", + "extend": "3.0.1", + "forever-agent": "0.6.1", + "form-data": "2.3.2", + "har-validator": "5.0.3", + "hawk": "6.0.2", + "http-signature": "1.2.0", + "is-typedarray": "1.0.0", + "isstream": "0.1.2", + "json-stringify-safe": "5.0.1", + "mime-types": "2.1.18", + "oauth-sign": "0.8.2", + "performance-now": "2.1.0", + "qs": "6.5.1", + "safe-buffer": "5.1.1", + "stringstream": "0.0.5", + "tough-cookie": "2.3.4", + "tunnel-agent": "0.6.0", + "uuid": "3.2.1" + } + }, + "request-progress": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-2.0.1.tgz", + "integrity": "sha1-XTa7V5YcZzqlt4jbyBQf3yO0Tgg=", + "dev": true, + "requires": { + "throttleit": "1.0.0" + } + }, + "requizzle": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.1.tgz", + "integrity": "sha1-aUPDUwxNmn5G8c3dUcFY/GcM294=", + "dev": true, + "requires": { + "underscore": "1.6.0" + }, + "dependencies": { + "underscore": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.6.0.tgz", + "integrity": "sha1-izixDKze9jM3uLJOT/htRa6lKag=", + "dev": true + } + } + }, + "resolve": { + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.1.7.tgz", + "integrity": "sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=", + "dev": true + }, + "resumer": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/resumer/-/resumer-0.0.0.tgz", + "integrity": "sha1-8ej0YeQGS6Oegq883CqMiT0HZ1k=", + "dev": true, + "requires": { + "through": "2.3.8" + } + }, + "right-align": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/right-align/-/right-align-0.1.3.tgz", + "integrity": "sha1-YTObci/mo1FWiSENJOFMlhSGE+8=", + "dev": true, + "optional": true, + "requires": { + "align-text": "0.1.4" + } + }, + "safe-buffer": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", + "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==", + "dev": true + }, + "semver": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", + "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==", + "dev": true + }, + "set-blocking": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "simple-get": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-1.4.3.tgz", + "integrity": "sha1-6XVe2kB+ltpAxeUVjJ6jezO+y+s=", + "dev": true, + "requires": { + "once": "1.4.0", + "unzip-response": "1.0.2", + "xtend": "4.0.1" + }, + "dependencies": { + "xtend": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", + "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=", + "dev": true + } + } + }, + "sntp": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/sntp/-/sntp-2.1.0.tgz", + "integrity": "sha512-FL1b58BDrqS3A11lJ0zEdnJ3UOKqVxawAkF3k7F0CVN7VQ34aZrV+G8BZ1WC9ZL7NyrwsW0oviwsWDgRuVYtJg==", + "dev": true, + "requires": { + "hoek": "4.2.1" + } + }, + "source-map": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.2.0.tgz", + "integrity": "sha1-2rc/vPwrqBm03gO9b26qSBZLP50=", + "dev": true, + "optional": true, + "requires": { + "amdefine": "1.0.1" + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "sshpk": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.13.1.tgz", + "integrity": "sha1-US322mKHFEMW3EwY/hzx2UBzm+M=", + "dev": true, + "requires": { + "asn1": "0.2.3", + "assert-plus": "1.0.0", + "bcrypt-pbkdf": "1.0.1", + "dashdash": "1.14.1", + "ecc-jsbn": "0.1.1", + "getpass": "0.1.7", + "jsbn": "0.1.1", + "tweetnacl": "0.14.5" + } + }, + "statuses": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.3.1.tgz", + "integrity": "sha1-+vUbnrdKrvOzrPStX2Gr8ky3uT4=", + "dev": true + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "dev": true, + "requires": { + "code-point-at": "1.1.0", + "is-fullwidth-code-point": "1.0.0", + "strip-ansi": "3.0.1" + } + }, + "string.prototype.trim": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.1.2.tgz", + "integrity": "sha1-0E3iyJ4Tf019IG8Ia17S+ua+jOo=", + "dev": true, + "requires": { + "define-properties": "1.1.2", + "es-abstract": "1.10.0", + "function-bind": "1.1.1" + } + }, + "string_decoder": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.0.3.tgz", + "integrity": "sha512-4AH6Z5fzNNBcH+6XDMfA/BTt87skxqJlO0lAh3Dker5zThcAxG6mKz+iGu308UKoPPQ8Dcqx/4JhujzltRa+hQ==", + "dev": true, + "requires": { + "safe-buffer": "5.1.1" + } + }, + "stringstream": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.5.tgz", + "integrity": "sha1-TkhM1N5aC7vuGORjB3EKioFiGHg=", + "dev": true + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "requires": { + "ansi-regex": "2.1.1" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "dev": true + }, + "supports-color": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.2.3.tgz", + "integrity": "sha1-ZawFBLOVQXHYpklGsq48u4pfVPY=", + "dev": true, + "requires": { + "has-flag": "1.0.0" + } + }, + "taffydb": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/taffydb/-/taffydb-2.6.2.tgz", + "integrity": "sha1-fLy2S1oUG2ou/CxdLGe04VCyomg=", + "dev": true + }, + "tape": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/tape/-/tape-4.9.0.tgz", + "integrity": "sha512-j0jO9BiScfqtPBb9QmPLL0qvxXMz98xjkMb7x8lKipFlJZwNJkqkWPou+NU4V6T9RnVh1kuSthLE8gLrN8bBfw==", + "dev": true, + "requires": { + "deep-equal": "1.0.1", + "defined": "1.0.0", + "for-each": "0.3.2", + "function-bind": "1.1.1", + "glob": "7.1.2", + "has": "1.0.1", + "inherits": "2.0.3", + "minimist": "1.2.0", + "object-inspect": "1.5.0", + "resolve": "1.5.0", + "resumer": "0.0.0", + "string.prototype.trim": "1.1.2", + "through": "2.3.8" + }, + "dependencies": { + "deep-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz", + "integrity": "sha1-9dJgKStmDghO/0zbyfCK0yR0SLU=", + "dev": true + }, + "defined": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/defined/-/defined-1.0.0.tgz", + "integrity": "sha1-yY2bzvdWdBiOEQlpFRGZ45sfppM=", + "dev": true + }, + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dev": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + }, + "resolve": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.5.0.tgz", + "integrity": "sha512-hgoSGrc3pjzAPHNBg+KnFcK2HwlHTs/YrAGUr6qgTVUZmXv1UEXXl0bZNBKMA9fud6lRYFdPGz0xXxycPzmmiw==", + "dev": true, + "requires": { + "path-parse": "1.0.5" + } + } + } + }, + "tar-fs": { + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-1.16.0.tgz", + "integrity": "sha512-I9rb6v7mjWLtOfCau9eH5L7sLJyU2BnxtEZRQ5Mt+eRKmf1F0ohXmT/Jc3fr52kDvjJ/HV5MH3soQfPL5bQ0Yg==", + "dev": true, + "requires": { + "chownr": "1.0.1", + "mkdirp": "0.5.1", + "pump": "1.0.3", + "tar-stream": "1.5.5" + } + }, + "tar-stream": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.5.5.tgz", + "integrity": "sha512-mQdgLPc/Vjfr3VWqWbfxW8yQNiJCbAZ+Gf6GDu1Cy0bdb33ofyiNGBtAY96jHFhDuivCwgW1H9DgTON+INiXgg==", + "dev": true, + "requires": { + "bl": "1.0.3", + "end-of-stream": "1.4.1", + "readable-stream": "2.3.5", + "xtend": "4.0.1" + }, + "dependencies": { + "xtend": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", + "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=", + "dev": true + } + } + }, + "throttleit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz", + "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=", + "dev": true + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "tough-cookie": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.4.tgz", + "integrity": "sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA==", + "dev": true, + "requires": { + "punycode": "1.4.1" + } + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "dev": true, + "requires": { + "safe-buffer": "5.1.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", + "dev": true, + "optional": true + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "1.1.2" + } + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "uglify-js": { + "version": "2.8.29", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.8.29.tgz", + "integrity": "sha1-KcVzMUgFe7Th913zW3qcty5qWd0=", + "dev": true, + "optional": true, + "requires": { + "source-map": "0.5.7", + "uglify-to-browserify": "1.0.2", + "yargs": "3.10.0" + }, + "dependencies": { + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=", + "dev": true, + "optional": true + } + } + }, + "uglify-to-browserify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz", + "integrity": "sha1-bgkk1r2mta/jSeOabWMoUKD4grc=", + "dev": true, + "optional": true + }, + "underscore": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", + "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=", + "dev": true + }, + "underscore-contrib": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/underscore-contrib/-/underscore-contrib-0.3.0.tgz", + "integrity": "sha1-ZltmwkeD+PorGMn4y7Dix9SMJsc=", + "dev": true, + "requires": { + "underscore": "1.6.0" + }, + "dependencies": { + "underscore": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.6.0.tgz", + "integrity": "sha1-izixDKze9jM3uLJOT/htRa6lKag=", + "dev": true + } + } + }, + "unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", + "dev": true + }, + "unzip-response": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/unzip-response/-/unzip-response-1.0.2.tgz", + "integrity": "sha1-uYTwh3/AqJwsdzzB73tbIytbBv4=", + "dev": true + }, + "utf-8-validate": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/utf-8-validate/-/utf-8-validate-4.0.0.tgz", + "integrity": "sha512-JS/c6nR/qauqSdvTksgDO1142kYddTXz42y5X/he188B/kgcFLLB4l9CfZd+hGic/ORgsL+pPfwr9lYsL/80Fw==", + "dev": true, + "requires": { + "bindings": "1.3.0", + "nan": "2.8.0", + "prebuild-install": "2.3.0" + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", + "dev": true + }, + "uuid": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.2.1.tgz", + "integrity": "sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA==", + "dev": true + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "1.3.0" + } + }, + "which": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.0.tgz", + "integrity": "sha512-xcJpopdamTuY5duC/KnTTNBraPK54YwpenP4lzxU8H91GudWpFv38u0CKjclE1Wi2EH2EDz5LRcHcKbCIzqGyg==", + "dev": true, + "requires": { + "isexe": "2.0.0" + } + }, + "wide-align": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.2.tgz", + "integrity": "sha512-ijDLlyQ7s6x1JgCLur53osjm/UXUYD9+0PbYKrBsYisYXzCxN+HC3mYDNy/dWdmf3AwqwU3CXwDCvsNgGK1S0w==", + "dev": true, + "requires": { + "string-width": "1.0.2" + } + }, + "window-size": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.0.tgz", + "integrity": "sha1-VDjNLqk7IC76Ohn+iIeu58lPnJ0=", + "dev": true, + "optional": true + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "dev": true + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "ws": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-5.0.0.tgz", + "integrity": "sha512-XXG4S0b771C68AeTHebBsJJBZMguxj7Em+D657RViuj6ppRd3tfuOhIK8eGwZGNb76C8MjQfCTfH2NN50rJN4w==", + "requires": { + "async-limiter": "1.0.0" + } + }, + "xmlcreate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-1.0.2.tgz", + "integrity": "sha1-+mv3YqYKQT+z3Y9LA8WyaSONMI8=", + "dev": true + }, + "yargs": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.10.0.tgz", + "integrity": "sha1-9+572FfdfB0tOMDnTvvWgdFDH9E=", + "dev": true, + "optional": true, + "requires": { + "camelcase": "1.2.1", + "cliui": "2.1.0", + "decamelize": "1.2.0", + "window-size": "0.1.0" + } + }, + "yauzl": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.4.1.tgz", + "integrity": "sha1-lSj0QtqxsihOWLQ3m7GU4i4MQAU=", + "dev": true, + "requires": { + "fd-slicer": "1.0.1" + } + } + } +} diff --git a/vendor/git.apache.org/thrift.git/package.json b/vendor/git.apache.org/thrift.git/package.json index 24851b280..b03fc5758 100644 --- a/vendor/git.apache.org/thrift.git/package.json +++ b/vendor/git.apache.org/thrift.git/package.json @@ -6,7 +6,7 @@ "type": "git", "url": "https://git-wip-us.apache.org/repos/asf/thrift.git" }, - "version": "0.11.0", + "version": "1.0.0-dev", "author": { "name": "Apache Thrift Developers", "email": "dev@thrift.apache.org", @@ -37,19 +37,17 @@ "dependencies": { "node-int64": "^0.4.0", "q": "^1.5.0", - "ws": ">= 2.2.3" + "ws": "^5.0.0" }, "devDependencies": { "buffer-equals": "^1.0.4", - "commander": "^2.11.0", - "connect": "^3.6.4", + "commander": "^2.14.1", + "connect": "^3.6.6", "istanbul": "^0.4.5", - "jsdoc": ">=3.5.5", - "minimatch": "^3.0.4", - "phantomjs-prebuilt": "^2.1.7", - "run-browser": "^2.0.2", - "tape": "^4.8.0", - "utf-8-validate": "^3.0.0" + "jsdoc": "^3.5.5", + "phantomjs-prebuilt": "^2.1.16", + "tape": "^4.9.0", + "utf-8-validate": "^4.0.0" }, "scripts": { "cover": "lib/nodejs/test/testAll.sh COVER", diff --git a/vendor/git.apache.org/thrift.git/phpcs.xml.dist b/vendor/git.apache.org/thrift.git/phpcs.xml.dist new file mode 100644 index 000000000..180388aea --- /dev/null +++ b/vendor/git.apache.org/thrift.git/phpcs.xml.dist @@ -0,0 +1,25 @@ + + + The coding standard for thrift. + + lib/php/lib + lib/php/test + lib/php/test/packages/* + + + + + + + + + + + + lib/php/test/* + + + lib/php/test/* + + + diff --git a/vendor/git.apache.org/thrift.git/sonar-project.properties b/vendor/git.apache.org/thrift.git/sonar-project.properties index 6bc9989aa..aebeff0c0 100755 --- a/vendor/git.apache.org/thrift.git/sonar-project.properties +++ b/vendor/git.apache.org/thrift.git/sonar-project.properties @@ -16,7 +16,7 @@ development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between all major languages. # Apache Thrift Version -sonar.projectVersion=0.11.0 +sonar.projectVersion=1.0.0-dev # use this to set another version string # $ sonar-runner -D sonar.projectVersion=`git rev-parse HEAD` # set projectDate in combination with projectVersion for imports of old releases @@ -54,15 +54,15 @@ module1.sonar.projectName=Apache Thrift - Java Library module1.sonar.projectBaseDir=lib/java module1.sonar.sources=src module1.sonar.tests=test -module1.sonar.binaries=build/libthrift-0.11.0.jar -module1.sonar.libraries=build/lib/*.jar +module1.sonar.binaries=build/libs/libthrift-1.0.0.jar +module1.sonar.libraries=build/deps/*.jar module1.sonar.language=java module2.sonar.projectName=Apache Thrift - Java Tutorial module2.sonar.projectBaseDir=. module2.sonar.sources=tutorial/java/src, tutorial/java/gen-java module2.sonar.binaries=tutorial/java/tutorial.jar -module2.sonar.libraries=lib/java/build/lib/*.jar,lib/java/build/libthrift-0.11.0.jar +module2.sonar.libraries=lib/java/build/deps/*.jar,lib/java/build/libs/libthrift-1.0.0.jar module2.sonar.language=java module3.sonar.projectName=Apache Thrift - JavaScript Library diff --git a/vendor/git.apache.org/thrift.git/test/DoubleConstantsTest.thrift b/vendor/git.apache.org/thrift.git/test/DoubleConstantsTest.thrift new file mode 100644 index 000000000..c9212abbb --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/DoubleConstantsTest.thrift @@ -0,0 +1,17 @@ +namespace java thrift.test +namespace cpp thrift.test + +// more tests on double constants (precision and type checks) +const double DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST = 1 +const double DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST = -100 +const double DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST = 9223372036854775807 +const double DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST = -9223372036854775807 +const double DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST = 3.14159265359 +const double DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST = 1000000.1 +const double DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST = -1000000.1 +const double DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST = 1.7e+308 +const double DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST = 9223372036854775816.43 +const double DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST = -1.7e+308 +const double DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST = -9223372036854775816.43 + +const list DOUBLE_LIST_TEST = [1,-100,100,9223372036854775807,-9223372036854775807,3.14159265359,1000000.1,-1000000.1,1.7e+308,-1.7e+308,9223372036854775816.43,-9223372036854775816.43] diff --git a/vendor/git.apache.org/thrift.git/test/Makefile.am b/vendor/git.apache.org/thrift.git/test/Makefile.am index 335bae63a..7267066b7 100755 --- a/vendor/git.apache.org/thrift.git/test/Makefile.am +++ b/vendor/git.apache.org/thrift.git/test/Makefile.am @@ -25,6 +25,11 @@ SUBDIRS += c_glib PRECROSS_TARGET += precross-c_glib endif +if WITH_CL +SUBDIRS += cl +PRECROSS_TARGET += precross-cl +endif + if WITH_MONO SUBDIRS += csharp PRECROSS_TARGET += precross-csharp @@ -110,6 +115,7 @@ EXTRA_DIST = \ crossrunner \ keys \ c_glib \ + cl \ cpp \ dart \ erl \ @@ -128,6 +134,7 @@ EXTRA_DIST = \ BrokenConstants.thrift \ ConstantsDemo.thrift \ DebugProtoTest.thrift \ + DoubleConstantsTest.thrift \ DenseLinkingTest.thrift \ DocTest.thrift \ EnumTest.thrift \ diff --git a/vendor/git.apache.org/thrift.git/test/ThriftTest.thrift b/vendor/git.apache.org/thrift.git/test/ThriftTest.thrift index 24dcbb94e..bff4e5221 100644 --- a/vendor/git.apache.org/thrift.git/test/ThriftTest.thrift +++ b/vendor/git.apache.org/thrift.git/test/ThriftTest.thrift @@ -37,7 +37,7 @@ namespace delphi Thrift.Test namespace cocoa ThriftTest namespace lua ThriftTest namespace xsd test (uri = 'http://thrift.apache.org/ns/ThriftTest') -namespace netcore ThriftAsync.Test +namespace netcore ThriftTest // Presence of namespaces and sub-namespaces for which there is // no generator should compile with warnings only diff --git a/vendor/git.apache.org/thrift.git/test/cl/Makefile.am b/vendor/git.apache.org/thrift.git/test/cl/Makefile.am new file mode 100755 index 000000000..b5e72bcbb --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/cl/Makefile.am @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +THRIFT = $(top_builddir)/compiler/cpp/thrift + +stubs: ../ThriftTest.thrift + $(THRIFT) --gen cl ../ThriftTest.thrift + +TestServer: make-test-server.lisp + $(SBCL) --script make-test-server.lisp + +TestClient: make-test-client.lisp + $(SBCL) --script make-test-client.lisp + +precross: stubs TestServer TestClient + +clean-local: + $(RM) -r gen-cl + $(RM) TestServer + $(RM) TestClient + +EXTRA_DIST = \ + implementation.lisp \ + make-test-client.lisp \ + make-test-server.lisp \ + tests.lisp diff --git a/vendor/git.apache.org/thrift.git/test/cl/implementation.lisp b/vendor/git.apache.org/thrift.git/test/cl/implementation.lisp new file mode 100644 index 000000000..0caf7beb5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/cl/implementation.lisp @@ -0,0 +1,136 @@ +(in-package #:thrift.test-implementation) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(defun thrift.test.thrift-test-implementation:test-void () + (format t "testVoid()~%")) + +(defun thrift.test.thrift-test-implementation:test-string (thing) + (format t "testString(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-bool (thing) + (format t "testBool(~a)~%" (if thing "true" "false")) + thing) + +(defun thrift.test.thrift-test-implementation:test-byte (thing) + (format t "testByte(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-i32 (thing) + (format t "testI32(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-i64 (thing) + (format t "testI64(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-double (thing) + (format t "testDouble(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-binary (thing) + (format t "testBinary(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-struct (thing) + (format t "testStruct(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-nest (thing) + (format t "testNest(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-map (thing) + (format t "testMap(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-string-map (thing) + (format t "testStringMap(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-set (thing) + (format t "testSet(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-list (thing) + (format t "testList(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-enum (thing) + (format t "testEnum(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-typedef (thing) + (format t "testTypedef(~a)~%" thing) + thing) + +(defun thrift.test.thrift-test-implementation:test-map-map (hello) + (format t "testMapMap(~a)~%" hello) + '((-4 . ((-4 . -4) (-3 . -3) (-2 . -2) (-1 . -1))) (4 . ((1 . 1) (2 . 2) (3 . 3) (4 . 4))))) + +(defun thrift.test.thrift-test-implementation:test-insanity (argument) + (let ((result `((1 . ((2 . ,argument) (3 . ,argument))) + (2 . ((6 . ,(thrift.test::make-insanity :user-map nil :xtructs nil))))))) + (format t "~a~%" result) + result)) + +(defun thrift.test.thrift-test-implementation:test-multi (arg0 arg1 arg2 arg3 arg4 arg5) + (declare (ignorable arg3 arg4 arg5)) + (format t "testMulti()~%") + (thrift.test:make-xtruct :string-thing "Hello2" + :byte-thing arg0 + :i32-thing arg1 + :i64-thing arg2)) + +(defun thrift.test.thrift-test-implementation:test-exception (arg) + (format t "testException(~a)~%" arg) + (cond + ((string= arg "Xception") (error 'thrift.test:xception + :error-code 1001 + :message arg)) + ((string= arg "TException") (error 'thrift.test:xception + :error-code 0 + :message "Stuff!")))) + +(defun thrift.test.thrift-test-implementation:test-multi-exception (arg0 arg1) + (format t "testMultiException(~a, ~a)~%" arg0 arg1) + (cond + ((string= arg0 "Xception") (error 'thrift.test:xception + :error-code 1001 + :message "This is an Xception")) + ((string= arg0 "Xception2") (error 'thrift.test:xception2 + :error-code 2002 + :struct-thing (thrift.test:make-xtruct :string-thing "This is an Xception2" + :byte-thing 0 + :i32-thing 0 + :i64-thing 0)))) + (thrift.test:make-xtruct :string-thing arg1 + :byte-thing 0 + :i32-thing 0 + :i64-thing 0)) + +(defun thrift.test.thrift-test-implementation:test-oneway (seconds) + (format t "testOneway(~a): Sleeping...~%" seconds) + (sleep seconds) + (format t "testOneway(~a): done sleeping!~%" seconds)) + +;;; Removed from the IDL definition. +#+(or) +(defun thrift.test.second-service-implementation:blah-blah () + (format t "blahBlah()~%")) + +(defun thrift.test.second-service-implementation:secondtest-string (thing) + (format t "secondtestString(~a)~%" thing) + (concatenate 'string "testString(\"" thing "\")")) + diff --git a/vendor/git.apache.org/thrift.git/test/cl/make-test-client.lisp b/vendor/git.apache.org/thrift.git/test/cl/make-test-client.lisp new file mode 100644 index 000000000..509669dd8 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/cl/make-test-client.lisp @@ -0,0 +1,93 @@ +(in-package #:cl-user) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +#+(or) (when (not (boundp 'sb-impl::default-external-format) + (setf sb-impl::default-external-format :UTF-8))) + +(require "asdf") +(load (merge-pathnames "../../lib/cl/load-locally.lisp" *load-truename*)) +(asdf:load-system :net.didierverna.clon) +(asdf:load-system :fiasco) +(asdf:load-asd (merge-pathnames "gen-cl/ThriftTest/thrift-gen-ThriftTest.asd" *load-truename*)) +(asdf:load-system :thrift-gen-thrifttest) + +(net.didierverna.clon:nickname-package) + +(defpackage #:thrift-cross + (:use #:common-lisp #:fiasco) + (:export #:cross-test)) + +(in-package #:thrift-cross) + +(defparameter *prot* nil) + +(load (merge-pathnames "tests.lisp" *load-truename*) :external-format :UTF-8) + +(clon:defsynopsis () + (text :contents "The Common Lisp client for Thrift's cross-language test suite.") + (group (:header "Allowed options:") + (flag :short-name "h" :long-name "help" + :description "Print this help and exit.") + (stropt :long-name "host" + :description "The host to connect to." + :default-value "localhost" + :argument-name "ARG") + (stropt :long-name "port" + :description "Number of the port to listen for connections on." + :default-value "9090" + :argument-name "ARG" + :argument-type :optional) + (stropt :long-name "transport" + :description "Transport: transport to use (\"buffered\", \"framed\")" + :default-value "buffered" + :argument-name "ARG") + (stropt :long-name "protocol" + :description "Protocol: protocol to use (\"binary\", \"multi\")" + :default-value "binary" + :argument-name "ARG"))) + +(defun main () + "Entry point for our standalone application." + (clon:make-context) + (when (clon:getopt :short-name "h") + (clon:help) + (clon:exit)) + (let ((port "9090") + (host "localhost") + (framed nil) + (multiplexed nil)) + (clon:do-cmdline-options (option name value source) + (print (list option name value source)) + (if (string= name "host") + (setf host value)) + (if (string= name "port") + (setf port value)) + (if (string= name "transport") + (cond ((string= value "buffered") (setf framed nil)) + ((string= value "framed") (setf framed t)) + (t (error "Unsupported transport.")))) + (if (string= name "protocol") + (cond ((string= value "binary") (setf multiplexed nil)) + ((string= value "multi") (setf multiplexed t)) + (t (error "Unsupported protocol."))))) + (terpri) + (setf *prot* (thrift.implementation::client (puri:parse-uri + (concatenate 'string "thrift://" host ":" port)) + :framed framed + :multiplexed multiplexed)) + (let ((result (cross-test :multiplexed multiplexed))) + (thrift.implementation::close *prot*) + (clon:exit result)))) + +(clon:dump "TestClient" main) diff --git a/vendor/git.apache.org/thrift.git/test/cl/make-test-server.lisp b/vendor/git.apache.org/thrift.git/test/cl/make-test-server.lisp new file mode 100644 index 000000000..293c87928 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/cl/make-test-server.lisp @@ -0,0 +1,80 @@ +(in-package #:cl-user) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(require "asdf") +(load (merge-pathnames "../../lib/cl/load-locally.lisp" *load-truename*)) +(asdf:load-system :net.didierverna.clon) +(asdf:load-asd (merge-pathnames "gen-cl/ThriftTest/thrift-gen-ThriftTest.asd" *load-truename*)) +(asdf:load-system :thrift-gen-thrifttest) +(load (merge-pathnames "implementation.lisp" *load-truename*)) + +(net.didierverna.clon:nickname-package) + +(clon:defsynopsis () + (text :contents "The Common Lisp server for Thrift's cross-language test suite.") + (group (:header "Allowed options:") + (flag :short-name "h" :long-name "help" + :description "Print this help and exit.") + (stropt :long-name "port" + :description "Number of the port to listen for connections on." + :default-value "9090" + :argument-name "ARG" + :argument-type :optional) + (stropt :long-name "server-type" + :description "The type of server, currently only \"simple\" is available." + :default-value "simple" + :argument-name "ARG") + (stropt :long-name "transport" + :description "Transport: transport to use (\"buffered\" or \"framed\")" + :default-value "buffered" + :argument-name "ARG") + (stropt :long-name "protocol" + :description "Protocol: protocol to use (\"binary\" or \"multi\")" + :default-value "binary" + :argument-name "ARG"))) + +(defun main () + "Entry point for our standalone application." + (clon:make-context) + (when (clon:getopt :short-name "h") + (clon:help) + (clon:exit)) + (let ((port "9090") + (framed nil) + (multiplexed nil)) + (clon:do-cmdline-options (option name value source) + (print (list option name value source)) + (if (string= name "port") + (setf port value)) + (if (string= name "transport") + (cond ((string= value "buffered") (setf framed nil)) + ((string= value "framed") (setf framed t)) + (t (error "Unsupported transport.")))) + (if (string= name "protocol") + (cond ((string= value "binary") (setf multiplexed nil)) + ((string= value "multi") (setf multiplexed t)) + (t (error "Unsupported protocol."))))) + (terpri) + (let ((services (if multiplexed + (list thrift.test:thrift-test thrift.test:second-service) + thrift.test:thrift-test))) + (thrift:serve (puri:parse-uri (concatenate 'string + "thrift://127.0.0.1:" + port)) + services + :framed framed + :multiplexed multiplexed))) + (clon:exit)) + +(clon:dump "TestServer" main) diff --git a/vendor/git.apache.org/thrift.git/test/cl/tests.lisp b/vendor/git.apache.org/thrift.git/test/cl/tests.lisp new file mode 100644 index 000000000..c5035fd98 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/cl/tests.lisp @@ -0,0 +1,240 @@ +(in-package #:thrift-cross) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +;;;; The tests here only make sense in the context of a TestServer +;;;; running and the dynamic variable thrift-cross::*prot* +;;;; being set with a client connection to the TestServer. Normally, +;;;; this is handled in make-test-client.lisp. + + +;;; Standard Thrift cross-test error codes +(defparameter *test_basetypes* 1) +(defparameter *test_structs* 2) +(defparameter *test_containers* 4) +(defparameter *test_exceptions* 8) +(defparameter *test_unknown* 64) +(defparameter *test_timeout* 128) + +(defun cross-test (&key (multiplexed nil)) + "The main cross-test runner." + (let ((result nil)) + (handler-case + (progn + (unless (run-package-tests :package :base-types) + (pushnew *test_basetypes* result)) + (unless (run-package-tests :package :structs) + (pushnew *test_structs* result)) + (unless (run-package-tests :package :containers) + (pushnew *test_containers* result)) + (unless (run-package-tests :package :exceptions) + (pushnew *test_exceptions* result)) + (unless (run-package-tests :package :misc) + (pushnew *test_unknown* result)) + + ;; It doesn't seem like anyone actually uses + ;; the second test service when testing multiplexing, + ;; so this would fail against servers in other + ;; languages. For now, anyway. + #+(or) + (when multiplexed + (unless (run-package-tests :package :multiplex) + (pushnew *test_unknown* result)))) + (error (e) (pushnew *test_unknown* result))) + (apply #'+ result))) + +(fiasco:define-test-package #:base-types) + +(in-package #:base-types) + +(defconstant *lang-string* "Afrikaans, Alemannisch, Aragonés, العربية, مصرى, Asturianu, Aymar aru, Azərbaycan, Башҡорт, Boarisch, Žemaitėška, Беларуская, Беларуская (тарашкевіца), Български, Bamanankan, বাংলা, Brezhoneg, Bosanski, Català, Mìng-dĕ̤ng-ngṳ̄, Нохчийн, Cebuano, ᏣᎳᎩ, Česky, Словѣ́ньскъ / ⰔⰎⰑⰂⰡⰐⰠⰔⰍⰟ, Чӑвашла, Cymraeg, Dansk, Zazaki, ދިވެހިބަސް, Ελληνικά, Emiliàn e rumagnòl, English, Esperanto, Español, Eesti, Euskara, فارسی, Suomi, Võro, Føroyskt, Français, Arpetan, Furlan, Frysk, Gaeilge, 贛語, Gàidhlig, Galego, Avañe'ẽ, ગુજરાતી, Gaelg, עברית, हिन्दी, Fiji Hindi, Hrvatski, Kreyòl ayisyen, Magyar, Հայերեն, Interlingua, Bahasa Indonesia, Ilokano, Ido, Íslenska, Italiano, 日本語, Lojban, Basa Jawa, ქართული, Kongo, Kalaallisut, ಕನ್ನಡ, 한국어, Къарачай-Малкъар, Ripoarisch, Kurdî, Коми, Kernewek, Кыргызча, Latina, Ladino, Lëtzebuergesch, Limburgs, Lingála, ລາວ, Lietuvių, Latviešu, Basa Banyumasan, Malagasy, Македонски, മലയാളം, मराठी, مازِرونی, Bahasa Melayu, Nnapulitano, Nedersaksisch, नेपाल भाषा, Nederlands, ‪Norsk (nynorsk)‬, ‪Norsk (bokmål)‬, Nouormand, Diné bizaad, Occitan, Иронау, Papiamentu, Deitsch, Polski, پنجابی, پښتو, Norfuk / Pitkern, Português, Runa Simi, Rumantsch, Romani, Română, Русский, Саха тыла, Sardu, Sicilianu, Scots, Sámegiella, Simple English, Slovenčina, Slovenščina, Српски / Srpski, Seeltersk, Svenska, Kiswahili, தமிழ், తెలుగు, Тоҷикӣ, ไทย, Türkmençe, Tagalog, Türkçe, Татарча/Tatarça, Українська, اردو, Tiếng Việt, Volapük, Walon, Winaray, 吴语, isiXhosa, ייִדיש, Yorùbá, Zeêuws, 中文, Bân-lâm-gú, 粵語") + +(defparameter *trick-string* (format nil "quote: \" backslash: \\ newline: ~% backspace: ~C ~ + tab: ~T junk: !@#$%&()(&%$#{}{}<><><" #\backspace)) + +(defconstant *binary-sequence* #(128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127)) + +(deftest void-test () + (is (null (thrift.test.thrift-test:test-void thrift-cross::*prot*)))) + +(deftest boolean-test () + (is (thrift.test.thrift-test:test-bool thrift-cross::*prot* t)) + (is (not (thrift.test.thrift-test:test-bool thrift-cross::*prot* nil)))) + +(deftest integer-test () + (is (= (thrift.test.thrift-test:test-byte thrift-cross::*prot* 127) 127)) + (is (= (thrift.test.thrift-test:test-byte thrift-cross::*prot* -128) -128)) + (is (= (thrift.test.thrift-test:test-byte thrift-cross::*prot* 42) 42)) + (is (= (thrift.test.thrift-test:test-byte thrift-cross::*prot* 0) 0)) + (is (= (thrift.test.thrift-test:test-i32 thrift-cross::*prot* 0) 0)) + (is (= (thrift.test.thrift-test:test-i32 thrift-cross::*prot* 2147483647) 2147483647)) + (is (= (thrift.test.thrift-test:test-i32 thrift-cross::*prot* -2147483648) -2147483648)) + (is (= (thrift.test.thrift-test:test-i64 thrift-cross::*prot* 0) 0)) + (is (= (thrift.test.thrift-test:test-i64 thrift-cross::*prot* 9223372036854775807) 9223372036854775807)) + (is (= (thrift.test.thrift-test:test-i64 thrift-cross::*prot* -9223372036854775808) -9223372036854775808))) + +(deftest double-test () + (is (= (thrift.test.thrift-test:test-double thrift-cross::*prot* 0.0) 0)) + (is (= (thrift.test.thrift-test:test-double thrift-cross::*prot* 42.0) 42)) + (is (= (thrift.test.thrift-test:test-double thrift-cross::*prot* -555.0) -555)) + (is (= (thrift.test.thrift-test:test-double thrift-cross::*prot* -52.3678) -52.3678))) + +(deftest string-test () + (is (string= (thrift.test.thrift-test:test-string thrift-cross::*prot* "") "")) + (is (string= (thrift.test.thrift-test:test-string thrift-cross::*prot* "(defun botsbuildbots () (botsbuilsbots))") + "(defun botsbuildbots () (botsbuilsbots))")) + (is (string= (thrift.test.thrift-test:test-string thrift-cross::*prot* *lang-string*) *lang-string*)) + (is (string= (thrift.test.thrift-test:test-string thrift-cross::*prot* *trick-string*) *trick-string*))) + +(deftest binary-test () + (is (equalp (thrift.test.thrift-test:test-binary thrift-cross::*prot* #()) #())) + (is (equalp (thrift.test.thrift-test:test-binary thrift-cross::*prot* *binary-sequence*) *binary-sequence*))) + +(deftest enum-test () + (is (= (thrift.test.thrift-test:test-enum thrift-cross::*prot* thrift.test:numberz.five) thrift.test:numberz.five)) + (is (= (thrift.test.thrift-test:test-enum thrift-cross::*prot* thrift.test:numberz.eight) thrift.test:numberz.eight)) + (is (= (thrift.test.thrift-test:test-enum thrift-cross::*prot* thrift.test:numberz.one) thrift.test:numberz.one))) + +(deftest typedef-test () + (is (= (thrift.test.thrift-test:test-typedef thrift-cross::*prot* 309858235082523) 309858235082523))) + +(fiasco:define-test-package #:structs) + +(in-package #:structs) + +(defparameter *test-struct* (thrift.test:make-xtruct :string-thing "Hell is empty." + :byte-thing -2 + :i32-thing 42 + :i64-thing 42424242)) + +(defparameter *test-nest* (thrift.test:make-xtruct2 :byte-thing 42 + :struct-thing *test-struct* + :i32-thing -42)) + +(deftest struct-test () + (let ((rec-struct (thrift.test.thrift-test:test-struct thrift-cross::*prot* *test-struct*))) + (is (string= (thrift.test:xtruct-string-thing *test-struct*) + (thrift.test:xtruct-string-thing rec-struct))) + (is (= (thrift.test:xtruct-byte-thing *test-struct*) + (thrift.test:xtruct-byte-thing rec-struct))) + (is (= (thrift.test:xtruct-i32-thing *test-struct*) + (thrift.test:xtruct-i32-thing rec-struct))) + (is (= (thrift.test:xtruct-i64-thing *test-struct*) + (thrift.test:xtruct-i64-thing rec-struct))))) + +(deftest nest-test () + (let* ((rec-nest (thrift.test.thrift-test:test-nest thrift-cross::*prot* *test-nest*)) + (rec-struct (thrift.test:xtruct2-struct-thing rec-nest))) + (is (string= (thrift.test:xtruct-string-thing *test-struct*) + (thrift.test:xtruct-string-thing rec-struct))) + (is (= (thrift.test:xtruct-byte-thing *test-struct*) + (thrift.test:xtruct-byte-thing rec-struct))) + (is (= (thrift.test:xtruct-i32-thing *test-struct*) + (thrift.test:xtruct-i32-thing rec-struct))) + (is (= (thrift.test:xtruct-i64-thing *test-struct*) + (thrift.test:xtruct-i64-thing rec-struct))) + (is (= (thrift.test:xtruct2-byte-thing *test-nest*) + (thrift.test:xtruct2-byte-thing rec-nest))) + (is (= (thrift.test:xtruct2-i32-thing *test-nest*) + (thrift.test:xtruct2-i32-thing rec-nest))))) + +(fiasco:define-test-package #:containers) + +(in-package #:containers) + +(deftest list-test () + (is (null (thrift.test.thrift-test:test-list thrift-cross::*prot* nil))) + (is (equal (thrift.test.thrift-test:test-list thrift-cross::*prot* '(42 -42 0 5)) '(42 -42 0 5)))) + +(deftest set-test () + (is (null (thrift.test.thrift-test:test-set thrift-cross::*prot* nil))) + (is (equal (sort (thrift.test.thrift-test:test-set thrift-cross::*prot* (list 42 -42 0 5)) #'<) + '(-42 0 5 42)))) + +(defun map= (map1 map2 &key (car-predicate #'equal) (cdr-predicate #'equal)) + "Compare two assoc maps according to the predicates given." + (not (set-exclusive-or map1 map2 :test (lambda (el1 el2) + (and (funcall car-predicate + (car el1) + (car el2)) + (funcall cdr-predicate + (cdr el1) + (cdr el2))))))) + +(deftest map-test () + (is (null (thrift.test.thrift-test:test-map thrift-cross::*prot* nil))) + (is (map= (thrift.test.thrift-test:test-map thrift-cross::*prot* '((0 . 1) (42 . -42) (5 . 5))) + '((0 . 1) (42 . -42) (5 . 5)))) + (is (map= (thrift.test.thrift-test:test-map-map thrift-cross::*prot* 42) + '((-4 . ((-4 . -4) (-3 . -3) (-2 . -2) (-1 . -1))) + (4 . ((1 . 1) (2 . 2) (3 . 3) (4 . 4)))) + :cdr-predicate #'map=))) + +(fiasco:define-test-package #:exceptions) + +(in-package #:exceptions) + +(defun test-xception (expected-code expected-message function &rest args) + "A helper function to test whether xception is signalled, and whether its fields have the expected values." + (handler-case (progn (apply function args) + nil) + (thrift.test:xception (ex) (and (= (thrift.test::xception-error-code ex) expected-code) + (string= (thrift.test::xception-message ex) expected-message))))) + +(defun test-xception2 (expected-code expected-message function &rest args) + "A helper function to test whether xception2 is signalled, and whether its fields have the expected values." + (handler-case (progn (apply function args) + nil) + (thrift.test:xception2 (ex) (and (= (thrift.test::xception2-error-code ex) expected-code) + (string= (thrift.test::xtruct-string-thing + (thrift.test::xception2-struct-thing ex)) + expected-message))))) + +(deftest exception-test () + (is (test-xception 1001 "Xception" #'thrift.test.thrift-test:test-exception thrift-cross::*prot* "Xception")) + (signals thrift:application-error (thrift.test.thrift-test:test-exception thrift-cross::*prot* "TException")) + (finishes (thrift.test.thrift-test:test-exception thrift-cross::*prot* "success"))) + +(deftest multi-exception-test () + (is (test-xception 1001 + "This is an Xception" + #'thrift.test.thrift-test:test-multi-exception + thrift-cross::*prot* + "Xception" + "meaningless")) + (is (test-xception2 2002 + "This is an Xception2" + #'thrift.test.thrift-test:test-multi-exception + thrift-cross::*prot* + "Xception2" + "meaningless too!")) + (is (string= "foobar" (thrift.test:xtruct-string-thing + (thrift.test.thrift-test:test-multi-exception thrift-cross::*prot* + "success!" + "foobar"))))) + +(fiasco:define-test-package #:misc) + +(in-package #:misc) + +(deftest oneway-test () + (is (null (thrift.test.thrift-test:test-oneway thrift-cross::*prot* 1)))) + +(fiasco:define-test-package #:multiplex) + +(in-package #:multiplex) + +(deftest multiplex-test () + ;; Removed from the IDL definition. + ;; (finishes (thrift.test.second-service:blah-blah thrift-cross::*prot*)) + (is (string= "asd" (thrift.test.second-service:secondtest-string thrift-cross::*prot* "asd")))) diff --git a/vendor/git.apache.org/thrift.git/test/cpp/src/TestServer.cpp b/vendor/git.apache.org/thrift.git/test/cpp/src/TestServer.cpp index 78b0a742b..1c3812410 100644 --- a/vendor/git.apache.org/thrift.git/test/cpp/src/TestServer.cpp +++ b/vendor/git.apache.org/thrift.git/test/cpp/src/TestServer.cpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -48,6 +49,9 @@ #ifdef HAVE_INTTYPES_H #include #endif +#ifdef HAVE_SIGNAL_H +#include +#endif #include #include @@ -58,7 +62,6 @@ #include #include -#include #if _WIN32 #include #endif @@ -74,6 +77,17 @@ using namespace apache::thrift::server; using namespace thrift::test; +// to handle a controlled shutdown, signal handling is mandatory +#ifdef HAVE_SIGNAL_H +apache::thrift::concurrency::Monitor gMonitor; +void signal_handler(int signum) +{ + if (signum == SIGINT) { + gMonitor.notifyAll(); + } +} +#endif + class TestHandler : public ThriftTestIf { public: TestHandler() {} @@ -113,7 +127,7 @@ public: void testBinary(std::string& _return, const std::string& thing) { std::ostringstream hexstr; hexstr << std::hex << thing; - printf("testBinary(%lu: %s)\n", thing.size(), hexstr.str().c_str()); + printf("testBinary(%lu: %s)\n", safe_numeric_cast(thing.size()), hexstr.str().c_str()); _return = thing; } @@ -634,6 +648,12 @@ int main(int argc, char** argv) { ssl = true; } +#if defined(HAVE_SIGNAL_H) && defined(SIGPIPE) + if (ssl) { + signal(SIGPIPE, SIG_IGN); // for OpenSSL, otherwise we end abruptly + } +#endif + if (vm.count("abstract-namespace")) { abstract_namespace = true; } @@ -676,7 +696,9 @@ int main(int argc, char** argv) { sslSocketFactory->loadCertificate(certPath.c_str()); sslSocketFactory->loadPrivateKey(keyPath.c_str()); sslSocketFactory->ciphers("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"); - serverSocket = stdcxx::shared_ptr(new TSSLServerSocket(port, sslSocketFactory)); + if (server_type != "nonblocking") { + serverSocket = stdcxx::shared_ptr(new TSSLServerSocket(port, sslSocketFactory)); + } } else { if (domain_socket != "") { if (abstract_namespace) { @@ -738,13 +760,11 @@ int main(int argc, char** argv) { server.reset(new TSimpleServer(testProcessor, serverSocket, transportFactory, protocolFactory)); } else if (server_type == "thread-pool") { - stdcxx::shared_ptr threadManager = ThreadManager::newSimpleThreadManager(workers); - stdcxx::shared_ptr threadFactory = stdcxx::shared_ptr(new PlatformThreadFactory()); + stdcxx::shared_ptr threadManager = ThreadManager::newSimpleThreadManager(workers); threadManager->threadFactory(threadFactory); - threadManager->start(); server.reset(new TThreadPoolServer(testProcessor, @@ -753,7 +773,6 @@ int main(int argc, char** argv) { protocolFactory, threadManager)); } else if (server_type == "threaded") { - server.reset( new TThreadedServer(testProcessor, serverSocket, transportFactory, protocolFactory)); } else if (server_type == "nonblocking") { @@ -769,10 +788,15 @@ int main(int argc, char** argv) { // provide a stop method. TEvhttpServer nonblockingServer(testBufferProcessor, port); nonblockingServer.serve(); - } else { - stdcxx::shared_ptr nbSocket; - nbSocket.reset(new transport::TNonblockingServerSocket(port)); + } else if (transport_type == "framed") { + stdcxx::shared_ptr nbSocket; + nbSocket.reset( + ssl ? new transport::TNonblockingSSLServerSocket(port, sslSocketFactory) + : new transport::TNonblockingServerSocket(port)); server.reset(new TNonblockingServer(testProcessor, protocolFactory, nbSocket)); + } else { + cerr << "server-type nonblocking requires transport of http or framed" << endl; + exit(1); } } @@ -782,20 +806,23 @@ int main(int argc, char** argv) { // if using header server->setOutputProtocolFactory(stdcxx::shared_ptr()); } + apache::thrift::concurrency::PlatformThreadFactory factory; factory.setDetached(false); stdcxx::shared_ptr serverThreadRunner(server); stdcxx::shared_ptr thread = factory.newThread(serverThreadRunner); - thread->start(); - // HACK: cross language test suite is unable to handle cin properly - // that's why we stay in a endless loop here - while (1) { - } - // FIXME: find another way to stop the server (e.g. a signal) - // cout<<"Press enter to stop the server."<start(); + gMonitor.waitForever(); // wait for a shutdown signal + +#ifdef HAVE_SIGNAL_H + signal(SIGINT, SIG_DFL); +#endif server->stop(); thread->join(); @@ -805,3 +832,4 @@ int main(int argc, char** argv) { cout << "done." << endl; return 0; } + diff --git a/vendor/git.apache.org/thrift.git/test/crossrunner/collect.py b/vendor/git.apache.org/thrift.git/test/crossrunner/collect.py index 03b0c36c9..e2d897828 100644 --- a/vendor/git.apache.org/thrift.git/test/crossrunner/collect.py +++ b/vendor/git.apache.org/thrift.git/test/crossrunner/collect.py @@ -51,6 +51,7 @@ VALID_JSON_KEYS = [ ] DEFAULT_MAX_DELAY = 5 +DEFAULT_SIGNAL = 1 DEFAULT_TIMEOUT = 5 @@ -112,7 +113,7 @@ def _do_collect_tests(servers, clients): yield name, impl1, impl2 def maybe_max(key, o1, o2, default): - """maximum of two if present, otherwise defult value""" + """maximum of two if present, otherwise default value""" v1 = o1.get(key) v2 = o2.get(key) return max(v1, v2) if v1 and v2 else v1 or v2 or default @@ -138,6 +139,7 @@ def _do_collect_tests(servers, clients): 'server': merge_metadata(sv, **{'protocol': proto1, 'transport': trans1}), 'client': merge_metadata(cl, **{'protocol': proto2, 'transport': trans2}), 'delay': maybe_max('delay', sv, cl, DEFAULT_MAX_DELAY), + 'stop_signal': maybe_max('stop_signal', sv, cl, DEFAULT_SIGNAL), 'timeout': maybe_max('timeout', sv, cl, DEFAULT_TIMEOUT), 'protocol': proto, 'transport': trans, diff --git a/vendor/git.apache.org/thrift.git/test/crossrunner/report.py b/vendor/git.apache.org/thrift.git/test/crossrunner/report.py index 76324ede1..5baf16195 100644 --- a/vendor/git.apache.org/thrift.git/test/crossrunner/report.py +++ b/vendor/git.apache.org/thrift.git/test/crossrunner/report.py @@ -157,9 +157,14 @@ class ExecReporter(TestReporter): ])), 'client': list(map(re.compile, [ '[Cc]onnection refused', - 'Could not connect to localhost', + 'Could not connect to', + 'Could not open UNIX ', # domain socket (rb) 'ECONNREFUSED', + 'econnrefused', # erl + 'CONNECTION-REFUSED-ERROR', # cl + 'connect ENOENT', # nodejs domain socket 'No such file or directory', # domain socket + 'Sockets.TcpClient.Connect', # csharp ])), } @@ -174,6 +179,7 @@ class ExecReporter(TestReporter): def match(line): for expr in exprs: if expr.search(line): + self._log.info("maybe false positive: %s" % line) return True with logfile_open(self.logpath, 'r') as fp: @@ -204,7 +210,7 @@ class ExecReporter(TestReporter): def _print_footer(self, returncode=None): self._print_bar() if returncode is not None: - print('Return code: %d' % returncode, file=self.out) + print('Return code: %d (negative values indicate kill by signal)' % returncode, file=self.out) else: print('Process is killed.', file=self.out) self._print_exec_time() @@ -261,7 +267,8 @@ class SummaryReporter(TestReporter): if not with_result: return '{:24s}{:18s}{:25s}'.format(name[:23], test.protocol[:17], trans[:24]) else: - return '{:24s}{:18s}{:25s}{:s}\n'.format(name[:23], test.protocol[:17], trans[:24], self._result_string(test)) + return '{:24s}{:18s}{:25s}{:s}\n'.format(name[:23], test.protocol[:17], + trans[:24], self._result_string(test)) def _print_test_header(self): self._print_bar() diff --git a/vendor/git.apache.org/thrift.git/test/crossrunner/run.py b/vendor/git.apache.org/thrift.git/test/crossrunner/run.py index f522bb19e..25c58cef3 100644 --- a/vendor/git.apache.org/thrift.git/test/crossrunner/run.py +++ b/vendor/git.apache.org/thrift.git/test/crossrunner/run.py @@ -23,19 +23,20 @@ import multiprocessing.managers import os import platform import random -import signal import socket import subprocess import sys -import threading import time from .compat import str_join -from .test import TestEntry, domain_socket_path from .report import ExecReporter, SummaryReporter +from .test import TestEntry +from .util import domain_socket_path -RESULT_TIMEOUT = 128 RESULT_ERROR = 64 +RESULT_TIMEOUT = 128 +SIGNONE = 0 +SIGKILL = 15 # globals ports = None @@ -43,35 +44,18 @@ stop = None class ExecutionContext(object): - def __init__(self, cmd, cwd, env, report): + def __init__(self, cmd, cwd, env, stop_signal, is_server, report): self._log = multiprocessing.get_logger() - self.report = report self.cmd = cmd self.cwd = cwd self.env = env - self.timer = None + self.stop_signal = stop_signal + self.is_server = is_server + self.report = report self.expired = False self.killed = False self.proc = None - def _expire(self): - self._log.info('Timeout') - self.expired = True - self.kill() - - def kill(self): - self._log.debug('Killing process : %d' % self.proc.pid) - self.killed = True - if platform.system() != 'Windows': - try: - os.killpg(self.proc.pid, signal.SIGKILL) - except Exception: - self._log.info('Failed to kill process group', exc_info=sys.exc_info()) - try: - self.proc.kill() - except Exception: - self._log.info('Failed to kill process', exc_info=sys.exc_info()) - def _popen_args(self): args = { 'cwd': self.cwd, @@ -87,75 +71,125 @@ class ExecutionContext(object): args.update(preexec_fn=os.setsid) return args - def start(self, timeout=0): + def start(self): joined = str_join(' ', self.cmd) self._log.debug('COMMAND: %s', joined) self._log.debug('WORKDIR: %s', self.cwd) self._log.debug('LOGFILE: %s', self.report.logpath) self.report.begin() self.proc = subprocess.Popen(self.cmd, **self._popen_args()) - if timeout > 0: - self.timer = threading.Timer(timeout, self._expire) - self.timer.start() + self._log.debug(' PID: %d', self.proc.pid) + self._log.debug(' PGID: %d', os.getpgid(self.proc.pid)) return self._scoped() @contextlib.contextmanager def _scoped(self): yield self - self._log.debug('Killing scoped process') - if self.proc.poll() is None: - self.kill() - self.report.killed() + if self.is_server: + # the server is supposed to run until we stop it + if self.returncode is not None: + self.report.died() + else: + if self.stop_signal != SIGNONE: + if self.sigwait(self.stop_signal): + self.report.end(self.returncode) + else: + self.report.killed() + else: + self.sigwait(SIGKILL) else: - self._log.debug('Process died unexpectedly') - self.report.died() + # the client is supposed to exit normally + if self.returncode is not None: + self.report.end(self.returncode) + else: + self.sigwait(SIGKILL) + self.report.killed() + self._log.debug('[{0}] exited with return code {1}'.format(self.proc.pid, self.returncode)) - def wait(self): - self.proc.communicate() - if self.timer: - self.timer.cancel() - self.report.end(self.returncode) + # Send a signal to the process and then wait for it to end + # If the signal requested is SIGNONE, no signal is sent, and + # instead we just wait for the process to end; further if it + # does not end normally with SIGNONE, we mark it as expired. + # If the process fails to end and the signal is not SIGKILL, + # it re-runs with SIGKILL so that a real process kill occurs + # returns True if the process ended, False if it may not have + def sigwait(self, sig=SIGKILL, timeout=2): + try: + if sig != SIGNONE: + self._log.debug('[{0}] send signal {1}'.format(self.proc.pid, sig)) + if sig == SIGKILL: + self.killed = True + try: + if platform.system() != 'Windows': + os.killpg(os.getpgid(self.proc.pid), sig) + else: + self.proc.send_signal(sig) + except Exception: + self._log.info('[{0}] Failed to kill process'.format(self.proc.pid), exc_info=sys.exc_info()) + self._log.debug('[{0}] wait begin, timeout {1} sec(s)'.format(self.proc.pid, timeout)) + self.proc.communicate(timeout=timeout) + self._log.debug('[{0}] process ended with return code {1}'.format(self.proc.pid, self.returncode)) + self.report.end(self.returncode) + return True + except subprocess.TimeoutExpired: + self._log.info('[{0}] timeout waiting for process to end'.format(self.proc.pid)) + if sig == SIGNONE: + self.expired = True + return False if sig == SIGKILL else self.sigwait(SIGKILL, 1) + + # called on the client process to wait for it to end naturally + def wait(self, timeout): + self.sigwait(SIGNONE, timeout) @property def returncode(self): return self.proc.returncode if self.proc else None -def exec_context(port, logdir, test, prog): +def exec_context(port, logdir, test, prog, is_server): report = ExecReporter(logdir, test, prog) prog.build_command(port) - return ExecutionContext(prog.command, prog.workdir, prog.env, report) + return ExecutionContext(prog.command, prog.workdir, prog.env, prog.stop_signal, is_server, report) def run_test(testdir, logdir, test_dict, max_retry, async=True): logger = multiprocessing.get_logger() - def ensure_socket_open(proc, port, max_delay): - sleeped = 0.1 - time.sleep(sleeped) - sleep_step = 0.2 + def ensure_socket_open(sv, port, test): + slept = 0.1 + time.sleep(slept) + sleep_step = 0.1 while True: - # Create sockets every iteration because refused sockets cannot be - # reused on some systems. - sock4 = socket.socket() - sock6 = socket.socket(family=socket.AF_INET6) - try: - if sock4.connect_ex(('127.0.0.1', port)) == 0 \ - or sock6.connect_ex(('::1', port)) == 0: - return True - if proc.poll() is not None: - logger.warn('server process is exited') - return False - if sleeped > max_delay: - logger.warn('sleeped for %f seconds but server port is not open' % sleeped) - return False - time.sleep(sleep_step) - sleeped += sleep_step - finally: - sock4.close() - sock6.close() - logger.debug('waited %f sec for server port open' % sleeped) - return True + if slept > test.delay: + logger.warn('[{0}] slept for {1} seconds but server is not open'.format(sv.proc.pid, slept)) + return False + if test.socket == 'domain': + if not os.path.exists(domain_socket_path(port)): + logger.debug('[{0}] domain(unix) socket not available yet. slept for {1} seconds so far'.format(sv.proc.pid, slept)) + time.sleep(sleep_step) + slept += sleep_step + elif test.socket == 'abstract': + return True + else: + # Create sockets every iteration because refused sockets cannot be + # reused on some systems. + sock4 = socket.socket() + sock6 = socket.socket(family=socket.AF_INET6) + try: + if sock4.connect_ex(('127.0.0.1', port)) == 0 \ + or sock6.connect_ex(('::1', port)) == 0: + return True + if sv.proc.poll() is not None: + logger.warn('[{0}] server process is exited'.format(sv.proc.pid)) + return False + logger.debug('[{0}] socket not available yet. slept for {1} seconds so far'.format(sv.proc.pid, slept)) + time.sleep(sleep_step) + slept += sleep_step + finally: + sock4.close() + sock6.close() + logger.debug('[{0}] server ready - waited for {1} seconds'.format(sv.proc.pid, slept)) + return True try: max_bind_retry = 3 @@ -169,31 +203,27 @@ def run_test(testdir, logdir, test_dict, max_retry, async=True): logger.debug('Start') with PortAllocator.alloc_port_scoped(ports, test.socket) as port: logger.debug('Start with port %d' % port) - sv = exec_context(port, logdir, test, test.server) - cl = exec_context(port, logdir, test, test.client) + sv = exec_context(port, logdir, test, test.server, True) + cl = exec_context(port, logdir, test, test.client, False) logger.debug('Starting server') with sv.start(): - if test.socket in ('domain', 'abstract'): - time.sleep(0.1) - port_ok = True - else: - port_ok = ensure_socket_open(sv.proc, port, test.delay) + port_ok = ensure_socket_open(sv, port, test) if port_ok: connect_retry_count = 0 - max_connect_retry = 3 - connect_retry_wait = 0.5 + max_connect_retry = 12 + connect_retry_wait = 0.25 while True: if sv.proc.poll() is not None: logger.info('not starting client because server process is absent') break logger.debug('Starting client') - cl.start(test.timeout) - logger.debug('Waiting client') - cl.wait() + cl.start() + logger.debug('Waiting client (up to %d secs)' % test.timeout) + cl.wait(test.timeout) if not cl.report.maybe_false_positive() or connect_retry_count >= max_connect_retry: if connect_retry_count > 0 and connect_retry_count < max_connect_retry: - logger.warn('[%s]: Connected after %d retry (%.2f sec each)' % (test.server.name, connect_retry_count, connect_retry_wait)) + logger.info('[%s]: Connected after %d retry (%.2f sec each)' % (test.server.name, connect_retry_count, connect_retry_wait)) # Wait for 50ms to see if server does not die at the end. time.sleep(0.05) break @@ -205,12 +235,18 @@ def run_test(testdir, logdir, test_dict, max_retry, async=True): logger.warn('[%s]: Detected socket bind failure, retrying...', test.server.name) bind_retry_count += 1 else: - if cl.expired: - result = RESULT_TIMEOUT + result = RESULT_TIMEOUT if cl.expired else cl.returncode if cl.proc.poll() is not None else RESULT_ERROR + + # For servers that handle a controlled shutdown by signal + # if they are killed, or return an error code, that is a + # problem. For servers that are not signal-aware, we simply + # kill them off; if we didn't kill them off, something else + # happened (crashed?) + if test.server.stop_signal != 0: + if sv.killed or sv.returncode > 0: + result |= RESULT_ERROR else: - result = cl.proc.returncode if cl.proc else RESULT_ERROR if not sv.killed: - # Server died without being killed. result |= RESULT_ERROR if result == 0 or retry_count >= max_retry: diff --git a/vendor/git.apache.org/thrift.git/test/crossrunner/test.py b/vendor/git.apache.org/thrift.git/test/crossrunner/test.py index 74fd916ec..633e92616 100644 --- a/vendor/git.apache.org/thrift.git/test/crossrunner/test.py +++ b/vendor/git.apache.org/thrift.git/test/crossrunner/test.py @@ -22,22 +22,20 @@ import multiprocessing import os import sys from .compat import path_join -from .util import merge_dict - - -def domain_socket_path(port): - return '/tmp/ThriftTest.thrift.%d' % port +from .util import merge_dict, domain_socket_path class TestProgram(object): - def __init__(self, kind, name, protocol, transport, socket, workdir, command, env=None, + def __init__(self, kind, name, protocol, transport, socket, workdir, stop_signal, command, env=None, extra_args=[], extra_args2=[], join_args=False, **kwargs): + self.kind = kind self.name = name self.protocol = protocol self.transport = transport self.socket = socket self.workdir = workdir + self.stop_signal = stop_signal self.command = None self._base_command = self._fix_cmd_path(command) if env: diff --git a/vendor/git.apache.org/thrift.git/test/crossrunner/util.py b/vendor/git.apache.org/thrift.git/test/crossrunner/util.py index e2d195a22..c214df85a 100644 --- a/vendor/git.apache.org/thrift.git/test/crossrunner/util.py +++ b/vendor/git.apache.org/thrift.git/test/crossrunner/util.py @@ -20,6 +20,10 @@ import copy +def domain_socket_path(port): + return '/tmp/ThriftTest.thrift.%d' % port + + def merge_dict(base, update): """Update dict concatenating list values""" res = copy.deepcopy(base) diff --git a/vendor/git.apache.org/thrift.git/test/csharp/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/test/csharp/Properties/AssemblyInfo.cs index e2def42e3..b1101a15f 100644 --- a/vendor/git.apache.org/thrift.git/test/csharp/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/test/csharp/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/test/csharp/TestClient.cs b/vendor/git.apache.org/thrift.git/test/csharp/TestClient.cs index 17e59787a..949c06e9f 100644 --- a/vendor/git.apache.org/thrift.git/test/csharp/TestClient.cs +++ b/vendor/git.apache.org/thrift.git/test/csharp/TestClient.cs @@ -62,7 +62,9 @@ namespace Test { string certPath = "../keys/client.p12"; X509Certificate cert = new X509Certificate2(certPath, "thrift"); - trans = new TTLSSocket(host, port, 0, cert, (o, c, chain, errors) => true, null, SslProtocols.Tls); + trans = new TTLSSocket(host, port, 0, cert, + (o, c, chain, errors) => true, + null, SslProtocols.Tls); } else { diff --git a/vendor/git.apache.org/thrift.git/test/csharp/TestServer.cs b/vendor/git.apache.org/thrift.git/test/csharp/TestServer.cs index e9c7168eb..bf645c26c 100644 --- a/vendor/git.apache.org/thrift.git/test/csharp/TestServer.cs +++ b/vendor/git.apache.org/thrift.git/test/csharp/TestServer.cs @@ -455,7 +455,9 @@ namespace Test if (useEncryption) { string certPath = "../keys/server.p12"; - trans = new TTLSServerSocket(port, 0, useBufferedSockets, new X509Certificate2(certPath, "thrift"), null, null, SslProtocols.Tls); + trans = new TTLSServerSocket(port, 0, useBufferedSockets, new X509Certificate2(certPath, "thrift"), + null, + null, SslProtocols.Tls); } else { diff --git a/vendor/git.apache.org/thrift.git/test/csharp/ThriftTest.csproj b/vendor/git.apache.org/thrift.git/test/csharp/ThriftTest.csproj index 8fe40aaa8..65c0daf7d 100644 --- a/vendor/git.apache.org/thrift.git/test/csharp/ThriftTest.csproj +++ b/vendor/git.apache.org/thrift.git/test/csharp/ThriftTest.csproj @@ -46,7 +46,7 @@ false true 0 - 0.11.0.%2a + 1.0.0.%2a false true diff --git a/vendor/git.apache.org/thrift.git/test/dart/test_client/pubspec.yaml b/vendor/git.apache.org/thrift.git/test/dart/test_client/pubspec.yaml index e386c0ec1..a91aa455a 100644 --- a/vendor/git.apache.org/thrift.git/test/dart/test_client/pubspec.yaml +++ b/vendor/git.apache.org/thrift.git/test/dart/test_client/pubspec.yaml @@ -16,7 +16,7 @@ # under the License. name: thrift_test_client -version: 0.11.0 +version: 1.0.0-dev description: A client integration test for the Dart Thrift library author: Apache Thrift Developers homepage: http://thrift.apache.org diff --git a/vendor/git.apache.org/thrift.git/test/erl/src/thrift_test.app.src b/vendor/git.apache.org/thrift.git/test/erl/src/thrift_test.app.src index b55297042..7896a9525 100644 --- a/vendor/git.apache.org/thrift.git/test/erl/src/thrift_test.app.src +++ b/vendor/git.apache.org/thrift.git/test/erl/src/thrift_test.app.src @@ -22,7 +22,7 @@ {description, "Thrift cross language test"}, % The version of the applicaton - {vsn, "0.11.0"}, + {vsn, "1.0.0-dev"}, % All modules used by the application. {modules, [ diff --git a/vendor/git.apache.org/thrift.git/test/features/known_failures_Linux.json b/vendor/git.apache.org/thrift.git/test/features/known_failures_Linux.json index f96356de7..83769682f 100644 --- a/vendor/git.apache.org/thrift.git/test/features/known_failures_Linux.json +++ b/vendor/git.apache.org/thrift.git/test/features/known_failures_Linux.json @@ -1,46 +1,50 @@ [ - "c_glib-limit_container_length_binary_buffered-ip", - "c_glib-limit_string_length_binary_buffered-ip", - "cpp-theader_framed_binary_multih-header_buffered-ip", - "cpp-theader_framed_compact_multih-header_buffered-ip", - "cpp-theader_unframed_binary_multih-header_buffered-ip", - "cpp-theader_unframed_compact_multih-header_buffered-ip", - "csharp-limit_container_length_binary_buffered-ip", - "csharp-limit_container_length_compact_buffered-ip", - "csharp-limit_string_length_binary_buffered-ip", - "csharp-limit_string_length_compact_buffered-ip", - "d-limit_container_length_binary_buffered-ip", - "d-limit_container_length_compact_buffered-ip", - "d-limit_string_length_binary_buffered-ip", - "d-limit_string_length_compact_buffered-ip", - "erl-limit_container_length_binary_buffered-ip", - "erl-limit_container_length_compact_buffered-ip", - "erl-limit_string_length_binary_buffered-ip", - "erl-limit_string_length_compact_buffered-ip", - "go-limit_container_length_binary_buffered-ip", - "go-limit_container_length_compact_buffered-ip", - "go-limit_string_length_binary_buffered-ip", - "go-limit_string_length_compact_buffered-ip", - "hs-limit_container_length_binary_buffered-ip", - "hs-limit_container_length_compact_buffered-ip", - "hs-limit_string_length_binary_buffered-ip", - "hs-limit_string_length_compact_buffered-ip", - "nodejs-limit_container_length_binary_buffered-ip", - "nodejs-limit_container_length_compact_buffered-ip", - "nodejs-limit_string_length_binary_buffered-ip", - "nodejs-limit_string_length_compact_buffered-ip", - "perl-limit_container_length_binary_buffered-ip", - "perl-limit_string_length_binary_buffered-ip", - "rb-limit_container_length_accel-binary_buffered-ip", - "rb-limit_container_length_binary_buffered-ip", - "rb-limit_container_length_compact_buffered-ip", - "rb-limit_string_length_accel-binary_buffered-ip", - "rb-limit_string_length_binary_buffered-ip", - "rb-limit_string_length_compact_buffered-ip", - "rs-limit_container_length_binary_buffered-ip", - "rs-limit_container_length_compact_buffered-ip", - "rs-limit_container_length_multic-compact_buffered-ip", - "rs-limit_string_length_binary_buffered-ip", - "rs-limit_string_length_compact_buffered-ip", - "rs-limit_string_length_multic-compact_buffered-ip" -] \ No newline at end of file + "c_glib-limit_container_length_binary_buffered-ip", + "c_glib-limit_string_length_binary_buffered-ip", + "cl-limit_string_length_binary_buffered-ip", + "cl-limit_container_length_binary_buffered-ip", + "cpp-theader_framed_binary_multih-header_buffered-ip", + "cpp-theader_framed_compact_multih-header_buffered-ip", + "cpp-theader_unframed_binary_multih-header_buffered-ip", + "cpp-theader_unframed_compact_multih-header_buffered-ip", + "csharp-limit_container_length_binary_buffered-ip", + "csharp-limit_container_length_compact_buffered-ip", + "csharp-limit_string_length_binary_buffered-ip", + "csharp-limit_string_length_compact_buffered-ip", + "d-limit_container_length_binary_buffered-ip", + "d-limit_container_length_compact_buffered-ip", + "d-limit_string_length_binary_buffered-ip", + "d-limit_string_length_compact_buffered-ip", + "erl-limit_container_length_binary_buffered-ip", + "erl-limit_container_length_compact_buffered-ip", + "erl-limit_string_length_binary_buffered-ip", + "erl-limit_string_length_compact_buffered-ip", + "go-limit_container_length_binary_buffered-ip", + "go-limit_container_length_compact_buffered-ip", + "go-limit_string_length_binary_buffered-ip", + "go-limit_string_length_compact_buffered-ip", + "hs-limit_container_length_binary_buffered-ip", + "hs-limit_container_length_compact_buffered-ip", + "hs-limit_string_length_binary_buffered-ip", + "hs-limit_string_length_compact_buffered-ip", + "nodejs-limit_container_length_binary_buffered-ip", + "nodejs-limit_container_length_compact_buffered-ip", + "nodejs-limit_string_length_binary_buffered-ip", + "nodejs-limit_string_length_compact_buffered-ip", + "perl-limit_container_length_binary_buffered-ip", + "perl-limit_string_length_binary_buffered-ip", + "rb-limit_container_length_accel-binary_buffered-ip", + "rb-limit_container_length_binary_buffered-ip", + "rb-limit_container_length_compact_buffered-ip", + "rb-limit_string_length_accel-binary_buffered-ip", + "rb-limit_string_length_binary_buffered-ip", + "rb-limit_string_length_compact_buffered-ip", + "rs-limit_container_length_binary_buffered-ip", + "rs-limit_container_length_compact_buffered-ip", + "rs-limit_container_length_multic-compact_buffered-ip", + "rs-limit_string_length_binary_buffered-ip", + "rs-limit_string_length_compact_buffered-ip", + "rs-limit_string_length_multic-compact_buffered-ip", + "netcore-limit_string_length_compact_buffered-ip", + "netcore-limit_container_length_compact_buffered-ip" +] diff --git a/vendor/git.apache.org/thrift.git/test/go/Makefile.am b/vendor/git.apache.org/thrift.git/test/go/Makefile.am index 6bc97f582..6da83394b 100644 --- a/vendor/git.apache.org/thrift.git/test/go/Makefile.am +++ b/vendor/git.apache.org/thrift.git/test/go/Makefile.am @@ -18,9 +18,6 @@ # BUILT_SOURCES = gopath -if GOVERSION_LT_17 -COMPILER_EXTRAFLAG=",legacy_context" -endif THRIFTCMD = $(THRIFT) -out src/gen --gen go:thrift_import=thrift$(COMPILER_EXTRAFLAG) THRIFTTEST = $(top_srcdir)/test/ThriftTest.thrift @@ -37,7 +34,6 @@ gopath: $(THRIFT) ThriftTest.thrift mkdir -p src/gen $(THRIFTCMD) ThriftTest.thrift $(THRIFTCMD) ../StressTest.thrift - GOPATH=`pwd` $(GO) get golang.org/x/net/context GOPATH=`pwd` $(GO) get github.com/golang/mock/gomock || true sed -i 's/\"context\"/\"golang.org\/x\/net\/context\"/g' src/github.com/golang/mock/gomock/controller.go || true GOPATH=`pwd` $(GO) get github.com/golang/mock/gomock diff --git a/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/go17.go b/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/go17.go deleted file mode 100644 index 81f1ad8ee..000000000 --- a/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/go17.go +++ /dev/null @@ -1,62 +0,0 @@ -// +build go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package main - -import ( - "context" - "sync/atomic" -) - -type handler struct{} - -func (h *handler) EchoVoid(ctx context.Context) (err error) { - atomic.AddInt64(&counter, 1) - return nil -} -func (h *handler) EchoByte(ctx context.Context, arg int8) (r int8, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoI32(ctx context.Context, arg int32) (r int32, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoI64(ctx context.Context, arg int64) (r int64, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoString(ctx context.Context, arg string) (r string, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoList(ctx context.Context, arg []int8) (r []int8, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoSet(ctx context.Context, arg map[int8]struct{}) (r map[int8]struct{}, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoMap(ctx context.Context, arg map[int8]int8) (r map[int8]int8, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} diff --git a/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/main.go b/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/main.go index e8e6b2a20..f2e0f2073 100644 --- a/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/main.go +++ b/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/main.go @@ -20,6 +20,7 @@ package main import ( + "context" "flag" "fmt" "gen/stress" @@ -216,3 +217,38 @@ func client(protocolFactory thrift.TProtocolFactory) { done.Done() } + +type handler struct{} + +func (h *handler) EchoVoid(ctx context.Context) (err error) { + atomic.AddInt64(&counter, 1) + return nil +} +func (h *handler) EchoByte(ctx context.Context, arg int8) (r int8, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} +func (h *handler) EchoI32(ctx context.Context, arg int32) (r int32, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} +func (h *handler) EchoI64(ctx context.Context, arg int64) (r int64, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} +func (h *handler) EchoString(ctx context.Context, arg string) (r string, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} +func (h *handler) EchoList(ctx context.Context, arg []int8) (r []int8, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} +func (h *handler) EchoSet(ctx context.Context, arg map[int8]struct{}) (r map[int8]struct{}, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} +func (h *handler) EchoMap(ctx context.Context, arg map[int8]int8) (r map[int8]int8, err error) { + atomic.AddInt64(&counter, 1) + return arg, nil +} diff --git a/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/pre_go17.go b/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/pre_go17.go deleted file mode 100644 index 07ae5c6a2..000000000 --- a/vendor/git.apache.org/thrift.git/test/go/src/bin/stress/pre_go17.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package main - -import ( - "sync/atomic" - - "golang.org/x/net/context" -) - -type handler struct{} - -func (h *handler) EchoVoid(ctx context.Context) (err error) { - atomic.AddInt64(&counter, 1) - return nil -} -func (h *handler) EchoByte(ctx context.Context, arg int8) (r int8, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoI32(ctx context.Context, arg int32) (r int32, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoI64(ctx context.Context, arg int64) (r int64, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoString(ctx context.Context, arg string) (r string, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoList(ctx context.Context, arg []int8) (r []int8, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoSet(ctx context.Context, arg map[int8]struct{}) (r map[int8]struct{}, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} -func (h *handler) EchoMap(ctx context.Context, arg map[int8]int8) (r map[int8]int8, err error) { - atomic.AddInt64(&counter, 1) - return arg, nil -} diff --git a/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/main.go b/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/main.go index ab24cbfc7..20104f9e1 100644 --- a/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/main.go +++ b/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/main.go @@ -21,6 +21,7 @@ package main import ( "common" + "context" "flag" "gen/thrifttest" t "log" @@ -60,6 +61,7 @@ var xxs = &thrifttest.Xtruct{ } var xcept = &thrifttest.Xception{ErrorCode: 1001, Message: "Xception"} +var defaultCtx = context.Background() func callEverything(client *thrifttest.ThriftTestClient) { var err error diff --git a/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/pre_go17.go b/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/pre_go17.go deleted file mode 100644 index 10a6fb8d9..000000000 --- a/vendor/git.apache.org/thrift.git/test/go/src/bin/testclient/pre_go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package main - -import "golang.org/x/net/context" - -var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/test/go/src/common/clientserver_test.go b/vendor/git.apache.org/thrift.git/test/go/src/common/clientserver_test.go index c4cfd44f3..9f93c4c6c 100644 --- a/vendor/git.apache.org/thrift.git/test/go/src/common/clientserver_test.go +++ b/vendor/git.apache.org/thrift.git/test/go/src/common/clientserver_test.go @@ -20,6 +20,7 @@ package common import ( + "context" "errors" "gen/thrifttest" "reflect" @@ -95,6 +96,7 @@ var xxs = &thrifttest.Xtruct{ } var xcept = &thrifttest.Xception{ErrorCode: 1001, Message: "some"} +var defaultCtx = context.Background() func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, handler *MockThriftTest) { gomock.InOrder( @@ -127,12 +129,12 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h ) var err error if err = client.TestVoid(defaultCtx); err != nil { - t.Errorf("Unexpected error in TestVoid() call: ", err) + t.Errorf("Unexpected error in TestVoid() call: %s", err) } thing, err := client.TestString(defaultCtx, "thing") if err != nil { - t.Errorf("Unexpected error in TestString() call: ", err) + t.Errorf("Unexpected error in TestString() call: %s", err) } if thing != "thing" { t.Errorf("Unexpected TestString() result, expected 'thing' got '%s' ", thing) @@ -140,22 +142,22 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h bl, err := client.TestBool(defaultCtx, true) if err != nil { - t.Errorf("Unexpected error in TestBool() call: ", err) + t.Errorf("Unexpected error in TestBool() call: %s", err) } if !bl { - t.Errorf("Unexpected TestBool() result expected true, got %f ", bl) + t.Errorf("Unexpected TestBool() result expected true, got %v ", bl) } bl, err = client.TestBool(defaultCtx, false) if err != nil { - t.Errorf("Unexpected error in TestBool() call: ", err) + t.Errorf("Unexpected error in TestBool() call: %s", err) } if bl { - t.Errorf("Unexpected TestBool() result expected false, got %f ", bl) + t.Errorf("Unexpected TestBool() result expected false, got %v ", bl) } b, err := client.TestByte(defaultCtx, 42) if err != nil { - t.Errorf("Unexpected error in TestByte() call: ", err) + t.Errorf("Unexpected error in TestByte() call: %s", err) } if b != 42 { t.Errorf("Unexpected TestByte() result expected 42, got %d ", b) @@ -163,7 +165,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h i32, err := client.TestI32(defaultCtx, 4242) if err != nil { - t.Errorf("Unexpected error in TestI32() call: ", err) + t.Errorf("Unexpected error in TestI32() call: %s", err) } if i32 != 4242 { t.Errorf("Unexpected TestI32() result expected 4242, got %d ", i32) @@ -171,7 +173,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h i64, err := client.TestI64(defaultCtx, 424242) if err != nil { - t.Errorf("Unexpected error in TestI64() call: ", err) + t.Errorf("Unexpected error in TestI64() call: %s", err) } if i64 != 424242 { t.Errorf("Unexpected TestI64() result expected 424242, got %d ", i64) @@ -179,7 +181,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h d, err := client.TestDouble(defaultCtx, 42.42) if err != nil { - t.Errorf("Unexpected error in TestDouble() call: ", err) + t.Errorf("Unexpected error in TestDouble() call: %s", err) } if d != 42.42 { t.Errorf("Unexpected TestDouble() result expected 42.42, got %f ", d) @@ -194,7 +196,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h xs.I64Thing = 424242 xsret, err := client.TestStruct(defaultCtx, xs) if err != nil { - t.Errorf("Unexpected error in TestStruct() call: ", err) + t.Errorf("Unexpected error in TestStruct() call: %s", err) } if *xs != *xsret { t.Errorf("Unexpected TestStruct() result expected %#v, got %#v ", xs, xsret) @@ -204,7 +206,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h x2.StructThing = xs x2ret, err := client.TestNest(defaultCtx, x2) if err != nil { - t.Errorf("Unexpected error in TestNest() call: ", err) + t.Errorf("Unexpected error in TestNest() call: %s", err) } if !reflect.DeepEqual(x2, x2ret) { t.Errorf("Unexpected TestNest() result expected %#v, got %#v ", x2, x2ret) @@ -213,7 +215,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h m := map[int32]int32{1: 2, 3: 4, 5: 42} mret, err := client.TestMap(defaultCtx, m) if err != nil { - t.Errorf("Unexpected error in TestMap() call: ", err) + t.Errorf("Unexpected error in TestMap() call: %s", err) } if !reflect.DeepEqual(m, mret) { t.Errorf("Unexpected TestMap() result expected %#v, got %#v ", m, mret) @@ -222,7 +224,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h sm := map[string]string{"a": "2", "b": "blah", "some": "thing"} smret, err := client.TestStringMap(defaultCtx, sm) if err != nil { - t.Errorf("Unexpected error in TestStringMap() call: ", err) + t.Errorf("Unexpected error in TestStringMap() call: %s", err) } if !reflect.DeepEqual(sm, smret) { t.Errorf("Unexpected TestStringMap() result expected %#v, got %#v ", sm, smret) @@ -231,7 +233,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h s := []int32{1, 2, 42} sret, err := client.TestSet(defaultCtx, s) if err != nil { - t.Errorf("Unexpected error in TestSet() call: ", err) + t.Errorf("Unexpected error in TestSet() call: %s", err) } // Sets can be in any order, but Go slices are ordered, so reflect.DeepEqual won't work. stemp := map[int32]struct{}{} @@ -247,7 +249,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h l := []int32{1, 2, 42} lret, err := client.TestList(defaultCtx, l) if err != nil { - t.Errorf("Unexpected error in TestList() call: ", err) + t.Errorf("Unexpected error in TestList() call: %s", err) } if !reflect.DeepEqual(l, lret) { t.Errorf("Unexpected TestList() result expected %#v, got %#v ", l, lret) @@ -255,7 +257,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h eret, err := client.TestEnum(defaultCtx, thrifttest.Numberz_TWO) if err != nil { - t.Errorf("Unexpected error in TestEnum() call: ", err) + t.Errorf("Unexpected error in TestEnum() call: %s", err) } if eret != thrifttest.Numberz_TWO { t.Errorf("Unexpected TestEnum() result expected %#v, got %#v ", thrifttest.Numberz_TWO, eret) @@ -263,7 +265,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h tret, err := client.TestTypedef(defaultCtx, thrifttest.UserId(42)) if err != nil { - t.Errorf("Unexpected error in TestTypedef() call: ", err) + t.Errorf("Unexpected error in TestTypedef() call: %s", err) } if tret != thrifttest.UserId(42) { t.Errorf("Unexpected TestTypedef() result expected %#v, got %#v ", thrifttest.UserId(42), tret) @@ -271,7 +273,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h mapmap, err := client.TestMapMap(defaultCtx, 42) if err != nil { - t.Errorf("Unexpected error in TestMapmap() call: ", err) + t.Errorf("Unexpected error in TestMapmap() call: %s", err) } if !reflect.DeepEqual(mapmap, rmapmap) { t.Errorf("Unexpected TestMapmap() result expected %#v, got %#v ", rmapmap, mapmap) @@ -279,7 +281,7 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h xxsret, err := client.TestMulti(defaultCtx, 42, 4242, 424242, map[int16]string{1: "blah", 2: "thing"}, thrifttest.Numberz_EIGHT, thrifttest.UserId(24)) if err != nil { - t.Errorf("Unexpected error in TestMulti() call: %v", err) + t.Errorf("Unexpected error in TestMulti() call: %s", err) } if !reflect.DeepEqual(xxs, xxsret) { t.Errorf("Unexpected TestMulti() result expected %#v, got %#v ", xxs, xxsret) @@ -323,11 +325,11 @@ func callEverythingWithMock(t *testing.T, client *thrifttest.ThriftTestClient, h err = client.TestOneway(defaultCtx, 2) if err != nil { - t.Errorf("Unexpected error in TestOneway() call: ", err) + t.Errorf("Unexpected error in TestOneway() call: %s", err) } //Make sure the connection still alive if err = client.TestVoid(defaultCtx); err != nil { - t.Errorf("Unexpected error in TestVoid() call: ", err) + t.Errorf("Unexpected error in TestVoid() call: %s", err) } } diff --git a/vendor/git.apache.org/thrift.git/test/go/src/common/context_test.go b/vendor/git.apache.org/thrift.git/test/go/src/common/context_test.go new file mode 100644 index 000000000..e64dbb9ad --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/go/src/common/context_test.go @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package common + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "os" + "syscall" + "testing" + "thrift" + "time" +) + +type slowHttpHandler struct{} + +func (slowHttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + time.Sleep(1 * time.Second) +} + +func TestHttpContextTimeout(t *testing.T) { + certPath = "../../../keys" + + unit := test_unit{"127.0.0.1", 9096, "", "http", "binary", false} + + server := &http.Server{Addr: unit.host + fmt.Sprintf(":%d", unit.port), Handler: slowHttpHandler{}} + go server.ListenAndServe() + + client, trans, err := StartClient(unit.host, unit.port, unit.domain_socket, unit.transport, unit.protocol, unit.ssl) + if err != nil { + t.Errorf("Unable to start client: %v", err) + return + } + defer trans.Close() + + unwrapErr := func(err error) error { + for { + switch err.(type) { + case thrift.TTransportException: + err = err.(thrift.TTransportException).Err() + case *url.Error: + err = err.(*url.Error).Err + case *net.OpError: + err = err.(*net.OpError).Err + case *os.SyscallError: + err = err.(*os.SyscallError).Err + default: + return err + } + } + } + + serverStartupDeadline := time.Now().Add(5 * time.Second) + for { + ctx, _ := context.WithTimeout(context.Background(), 50*time.Millisecond) + err = client.TestVoid(ctx) + err = unwrapErr(err) + if err != syscall.ECONNREFUSED || time.Now().After(serverStartupDeadline) { + break + } + time.Sleep(time.Millisecond) + } + + if err == nil { + t.Errorf("Request completed (should have timed out)") + return + } + + // We've got to switch on `err.Error()` here since go1.7 doesn't properly return + // `context.DeadlineExceeded` error and `http.errRequestCanceled` is not exported. + // See https://github.com/golang/go/issues/17711 + switch err.Error() { + case context.DeadlineExceeded.Error(), "net/http: request canceled": + // Expected error + default: + t.Errorf("Unexpected error: %s", err) + } +} diff --git a/vendor/git.apache.org/thrift.git/test/go/src/common/go17.go b/vendor/git.apache.org/thrift.git/test/go/src/common/go17.go deleted file mode 100644 index 9aca4075c..000000000 --- a/vendor/git.apache.org/thrift.git/test/go/src/common/go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package common - -import "context" - -var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/test/go/src/common/pre_go17.go b/vendor/git.apache.org/thrift.git/test/go/src/common/pre_go17.go deleted file mode 100644 index 6c14579d2..000000000 --- a/vendor/git.apache.org/thrift.git/test/go/src/common/pre_go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package common - -import "golang.org/x/net/context" - -var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler.go b/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler.go index c0a286267..2b22d0c97 100644 --- a/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler.go +++ b/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler.go @@ -1,5 +1,3 @@ -// +build !go1.7 - /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -22,13 +20,12 @@ package common import ( + "context" + "encoding/hex" "errors" "fmt" - "encoding/hex" . "gen/thrifttest" "time" - - "golang.org/x/net/context" ) var PrintingHandler = &printingHandler{} @@ -280,11 +277,11 @@ func (p *printingHandler) TestMapMap(ctx context.Context, hello int32) (r map[in func (p *printingHandler) TestInsanity(ctx context.Context, argument *Insanity) (r map[UserId]map[Numberz]*Insanity, err error) { fmt.Printf("testInsanity()\n") r = make(map[UserId]map[Numberz]*Insanity) - r[1] = map[Numberz]*Insanity { + r[1] = map[Numberz]*Insanity{ 2: argument, 3: argument, } - r[2] = map[Numberz]*Insanity { + r[2] = map[Numberz]*Insanity{ 6: NewInsanity(), } return diff --git a/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler_go17.go b/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler_go17.go deleted file mode 100644 index 1efae8676..000000000 --- a/vendor/git.apache.org/thrift.git/test/go/src/common/printing_handler_go17.go +++ /dev/null @@ -1,386 +0,0 @@ -// +build go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package common - -import ( - "context" - "errors" - "fmt" - "encoding/hex" - . "gen/thrifttest" - "time" -) - -var PrintingHandler = &printingHandler{} - -type printingHandler struct{} - -// Prints "testVoid()" and returns nothing. -func (p *printingHandler) TestVoid(ctx context.Context) (err error) { - fmt.Println("testVoid()") - return nil -} - -// Prints 'testString("%s")' with thing as '%s' -// @param string thing - the string to print -// @return string - returns the string 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestString(ctx context.Context, thing string) (r string, err error) { - fmt.Printf("testString(\"%s\")\n", thing) - return thing, nil -} - -// Prints 'testBool("%t")' with thing as 'true' or 'false' -// @param bool thing - the bool to print -// @return bool - returns the bool 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestBool(ctx context.Context, thing bool) (r bool, err error) { - fmt.Printf("testBool(%t)\n", thing) - return thing, nil -} - -// Prints 'testByte("%d")' with thing as '%d' -// @param byte thing - the byte to print -// @return byte - returns the byte 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestByte(ctx context.Context, thing int8) (r int8, err error) { - fmt.Printf("testByte(%d)\n", thing) - return thing, nil -} - -// Prints 'testI32("%d")' with thing as '%d' -// @param i32 thing - the i32 to print -// @return i32 - returns the i32 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestI32(ctx context.Context, thing int32) (r int32, err error) { - fmt.Printf("testI32(%d)\n", thing) - return thing, nil -} - -// Prints 'testI64("%d")' with thing as '%d' -// @param i64 thing - the i64 to print -// @return i64 - returns the i64 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestI64(ctx context.Context, thing int64) (r int64, err error) { - fmt.Printf("testI64(%d)\n", thing) - return thing, nil -} - -// Prints 'testDouble("%f")' with thing as '%f' -// @param double thing - the double to print -// @return double - returns the double 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestDouble(ctx context.Context, thing float64) (r float64, err error) { - fmt.Printf("testDouble(%f)\n", thing) - return thing, nil -} - -// Prints 'testBinary("%s")' where '%s' is a hex-formatted string of thing's data -// @param []byte thing - the binary to print -// @return []byte - returns the binary 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestBinary(ctx context.Context, thing []byte) (r []byte, err error) { - fmt.Printf("testBinary(%s)\n", hex.EncodeToString(thing)) - return thing, nil -} - -// Prints 'testStruct("{%s}")' where thing has been formatted into a string of comma separated values -// @param Xtruct thing - the Xtruct to print -// @return Xtruct - returns the Xtruct 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestStruct(ctx context.Context, thing *Xtruct) (r *Xtruct, err error) { - fmt.Printf("testStruct({\"%s\", %d, %d, %d})\n", thing.StringThing, thing.ByteThing, thing.I32Thing, thing.I64Thing) - return thing, err -} - -// Prints 'testNest("{%s}")' where thing has been formatted into a string of the nested struct -// @param Xtruct2 thing - the Xtruct2 to print -// @return Xtruct2 - returns the Xtruct2 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestNest(ctx context.Context, nest *Xtruct2) (r *Xtruct2, err error) { - thing := nest.StructThing - fmt.Printf("testNest({%d, {\"%s\", %d, %d, %d}, %d})\n", nest.ByteThing, thing.StringThing, thing.ByteThing, thing.I32Thing, thing.I64Thing, nest.I32Thing) - return nest, nil -} - -// Prints 'testMap("{%s")' where thing has been formatted into a string of 'key => value' pairs -// separated by commas and new lines -// @param map thing - the map to print -// @return map - returns the map 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestMap(ctx context.Context, thing map[int32]int32) (r map[int32]int32, err error) { - fmt.Printf("testMap({") - first := true - for k, v := range thing { - if first { - first = false - } else { - fmt.Printf(", ") - } - fmt.Printf("%d => %d", k, v) - } - fmt.Printf("})\n") - return thing, nil -} - -// Prints 'testStringMap("{%s}")' where thing has been formatted into a string of 'key => value' pairs -// separated by commas and new lines -// @param map thing - the map to print -// @return map - returns the map 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestStringMap(ctx context.Context, thing map[string]string) (r map[string]string, err error) { - fmt.Printf("testStringMap({") - first := true - for k, v := range thing { - if first { - first = false - } else { - fmt.Printf(", ") - } - fmt.Printf("%s => %s", k, v) - } - fmt.Printf("})\n") - return thing, nil -} - -// Prints 'testSet("{%s}")' where thing has been formatted into a string of values -// separated by commas and new lines -// @param set thing - the set to print -// @return set - returns the set 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestSet(ctx context.Context, thing []int32) (r []int32, err error) { - fmt.Printf("testSet({") - first := true - for k, _ := range thing { - if first { - first = false - } else { - fmt.Printf(", ") - } - fmt.Printf("%d", k) - } - fmt.Printf("})\n") - return thing, nil -} - -// Prints 'testList("{%s}")' where thing has been formatted into a string of values -// separated by commas and new lines -// @param list thing - the list to print -// @return list - returns the list 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestList(ctx context.Context, thing []int32) (r []int32, err error) { - fmt.Printf("testList({") - for i, v := range thing { - if i != 0 { - fmt.Printf(", ") - } - fmt.Printf("%d", v) - } - fmt.Printf("})\n") - return thing, nil -} - -// Prints 'testEnum("%d")' where thing has been formatted into it's numeric value -// @param Numberz thing - the Numberz to print -// @return Numberz - returns the Numberz 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestEnum(ctx context.Context, thing Numberz) (r Numberz, err error) { - fmt.Printf("testEnum(%d)\n", thing) - return thing, nil -} - -// Prints 'testTypedef("%d")' with thing as '%d' -// @param UserId thing - the UserId to print -// @return UserId - returns the UserId 'thing' -// -// Parameters: -// - Thing -func (p *printingHandler) TestTypedef(ctx context.Context, thing UserId) (r UserId, err error) { - fmt.Printf("testTypedef(%d)\n", thing) - return thing, nil -} - -// Prints 'testMapMap("%d")' with hello as '%d' -// @param i32 hello - the i32 to print -// @return map> - returns a dictionary with these values: -// {-4 => {-4 => -4, -3 => -3, -2 => -2, -1 => -1, }, 4 => {1 => 1, 2 => 2, 3 => 3, 4 => 4, }, } -// -// Parameters: -// - Hello -func (p *printingHandler) TestMapMap(ctx context.Context, hello int32) (r map[int32]map[int32]int32, err error) { - fmt.Printf("testMapMap(%d)\n", hello) - - r = map[int32]map[int32]int32{ - -4: map[int32]int32{-4: -4, -3: -3, -2: -2, -1: -1}, - 4: map[int32]int32{4: 4, 3: 3, 2: 2, 1: 1}, - } - return -} - -// So you think you've got this all worked, out eh? -// -// Creates a the returned map with these values and prints it out: -// { 1 => { 2 => argument, -// 3 => argument, -// }, -// 2 => { 6 => , }, -// } -// @return map> - a map with the above values -// -// Parameters: -// - Argument -func (p *printingHandler) TestInsanity(ctx context.Context, argument *Insanity) (r map[UserId]map[Numberz]*Insanity, err error) { - fmt.Printf("testInsanity()\n") - r = make(map[UserId]map[Numberz]*Insanity) - r[1] = map[Numberz]*Insanity { - 2: argument, - 3: argument, - } - r[2] = map[Numberz]*Insanity { - 6: NewInsanity(), - } - return -} - -// Prints 'testMulti()' -// @param byte arg0 - -// @param i32 arg1 - -// @param i64 arg2 - -// @param map arg3 - -// @param Numberz arg4 - -// @param UserId arg5 - -// @return Xtruct - returns an Xtruct with StringThing = "Hello2, ByteThing = arg0, I32Thing = arg1 -// and I64Thing = arg2 -// -// Parameters: -// - Arg0 -// - Arg1 -// - Arg2 -// - Arg3 -// - Arg4 -// - Arg5 -func (p *printingHandler) TestMulti(ctx context.Context, arg0 int8, arg1 int32, arg2 int64, arg3 map[int16]string, arg4 Numberz, arg5 UserId) (r *Xtruct, err error) { - fmt.Printf("testMulti()\n") - r = NewXtruct() - - r.StringThing = "Hello2" - r.ByteThing = arg0 - r.I32Thing = arg1 - r.I64Thing = arg2 - return -} - -// Print 'testException(%s)' with arg as '%s' -// @param string arg - a string indication what type of exception to throw -// if arg == "Xception" throw Xception with errorCode = 1001 and message = arg -// elsen if arg == "TException" throw TException -// else do not throw anything -// -// Parameters: -// - Arg -func (p *printingHandler) TestException(ctx context.Context, arg string) (err error) { - fmt.Printf("testException(%s)\n", arg) - switch arg { - case "Xception": - e := NewXception() - e.ErrorCode = 1001 - e.Message = arg - return e - case "TException": - return errors.New("Just TException") - } - return -} - -// Print 'testMultiException(%s, %s)' with arg0 as '%s' and arg1 as '%s' -// @param string arg - a string indication what type of exception to throw -// if arg0 == "Xception" throw Xception with errorCode = 1001 and message = "This is an Xception" -// elsen if arg0 == "Xception2" throw Xception2 with errorCode = 2002 and message = "This is an Xception2" -// else do not throw anything -// @return Xtruct - an Xtruct with StringThing = arg1 -// -// Parameters: -// - Arg0 -// - Arg1 -func (p *printingHandler) TestMultiException(ctx context.Context, arg0 string, arg1 string) (r *Xtruct, err error) { - fmt.Printf("testMultiException(%s, %s)\n", arg0, arg1) - switch arg0 { - - case "Xception": - e := NewXception() - e.ErrorCode = 1001 - e.Message = "This is an Xception" - return nil, e - case "Xception2": - e := NewXception2() - e.ErrorCode = 2002 - e.StructThing = NewXtruct() - e.StructThing.StringThing = "This is an Xception2" - return nil, e - default: - r = NewXtruct() - r.StringThing = arg1 - return - } -} - -// Print 'testOneway(%d): Sleeping...' with secondsToSleep as '%d' -// sleep 'secondsToSleep' -// Print 'testOneway(%d): done sleeping!' with secondsToSleep as '%d' -// @param i32 secondsToSleep - the number of seconds to sleep -// -// Parameters: -// - SecondsToSleep -func (p *printingHandler) TestOneway(ctx context.Context, secondsToSleep int32) (err error) { - fmt.Printf("testOneway(%d): Sleeping...\n", secondsToSleep) - time.Sleep(time.Second * time.Duration(secondsToSleep)) - fmt.Printf("testOneway(%d): done sleeping!\n", secondsToSleep) - return -} diff --git a/vendor/git.apache.org/thrift.git/test/haxe/make_all.sh b/vendor/git.apache.org/thrift.git/test/haxe/make_all.sh old mode 100644 new mode 100755 index 262125877..eb2c9c2c7 --- a/vendor/git.apache.org/thrift.git/test/haxe/make_all.sh +++ b/vendor/git.apache.org/thrift.git/test/haxe/make_all.sh @@ -19,7 +19,7 @@ # # invoke Thrift comnpiler -thrift -r -gen haxe ../ThriftTest.thrift +../../compiler/cpp/thrift -r -gen haxe ../ThriftTest.thrift # output folder if [ ! -d bin ]; then diff --git a/vendor/git.apache.org/thrift.git/test/known_failures_Linux.json b/vendor/git.apache.org/thrift.git/test/known_failures_Linux.json index 754535f12..f7676498a 100644 --- a/vendor/git.apache.org/thrift.git/test/known_failures_Linux.json +++ b/vendor/git.apache.org/thrift.git/test/known_failures_Linux.json @@ -11,28 +11,24 @@ "c_glib-rs_multi_framed-ip", "c_glib-rs_multic_buffered-ip", "c_glib-rs_multic_framed-ip", - "cpp-cpp_binary_http-domain", - "cpp-cpp_compact_http-domain", - "cpp-cpp_compact_http-ip", - "cpp-cpp_header_http-domain", - "cpp-cpp_json_http-domain", - "cpp-cpp_json_http-ip", - "cpp-cpp_multi-binary_http-domain", - "cpp-cpp_multi-binary_http-ip", - "cpp-cpp_multi_http-domain", - "cpp-cpp_multi_http-ip", - "cpp-cpp_multic-compact_http-domain", - "cpp-cpp_multic-compact_http-ip", - "cpp-cpp_multic_http-domain", - "cpp-cpp_multic_http-ip", - "cpp-cpp_multih-header_http-domain", - "cpp-cpp_multih-header_http-ip", - "cpp-cpp_multih_http-domain", - "cpp-cpp_multih_http-ip", - "cpp-cpp_multij-json_http-domain", - "cpp-cpp_multij-json_http-ip", - "cpp-cpp_multij_http-domain", - "cpp-cpp_multij_http-ip", + "cl-c_glib_binary_buffered-ip", + "cl-c_glib_binary_framed-ip", + "cl-c_glib_multi-binary_buffered-ip", + "cl-c_glib_multi-binary_framed-ip", + "cl-c_glib_multi_buffered-ip", + "cl-c_glib_multi_framed-ip", + "cl-go_binary_buffered-ip", + "cl-go_binary_framed-ip", + "cl-rb_binary-accel_buffered-ip", + "cl-rb_binary-accel_framed-ip", + "cl-rb_binary_buffered-ip", + "cl-rb_binary_framed-ip", + "cl-rs_binary_buffered-ip", + "cl-rs_binary_framed-ip", + "cl-rs_multi-binary_buffered-ip", + "cl-rs_multi-binary_framed-ip", + "cl-rs_multi_buffered-ip", + "cl-rs_multi_framed-ip", "cpp-dart_binary_http-ip", "cpp-dart_compact_http-ip", "cpp-dart_json_http-ip", @@ -69,16 +65,22 @@ "cpp-java_multij-json_http-ip-ssl", "cpp-java_multij_http-ip", "cpp-java_multij_http-ip-ssl", + "cpp-nodejs_binary_http-domain", "cpp-nodejs_binary_http-ip", "cpp-nodejs_binary_http-ip-ssl", + "cpp-nodejs_compact_http-domain", "cpp-nodejs_compact_http-ip", "cpp-nodejs_compact_http-ip-ssl", + "cpp-nodejs_json_http-domain", "cpp-nodejs_json_http-ip", "cpp-nodejs_json_http-ip-ssl", + "cpp-nodejs_multi-binary_http-domain", "cpp-nodejs_multi-binary_http-ip", "cpp-nodejs_multi-binary_http-ip-ssl", + "cpp-nodejs_multic-compact_http-domain", "cpp-nodejs_multic-compact_http-ip", "cpp-nodejs_multic-compact_http-ip-ssl", + "cpp-nodejs_multij-json_http-domain", "cpp-nodejs_multij-json_http-ip", "cpp-nodejs_multij-json_http-ip-ssl", "cpp-rs_multi_buffered-ip", @@ -95,6 +97,16 @@ "csharp-erl_binary_framed-ip-ssl", "csharp-erl_compact_buffered-ip-ssl", "csharp-erl_compact_framed-ip-ssl", + "csharp-rb_binary-accel_buffered-ip-ssl", + "csharp-rb_binary-accel_framed-ip-ssl", + "csharp-rb_binary_buffered-ip-ssl", + "csharp-rb_binary_framed-ip-ssl", + "csharp-rb_compact_buffered-ip-ssl", + "csharp-rb_compact_framed-ip-ssl", + "csharp-rb_json_buffered-ip-ssl", + "csharp-rb_json_framed-ip-ssl", + "d-cl_binary_buffered-ip", + "d-cl_binary_framed-ip", "d-cpp_binary_buffered-ip", "d-cpp_binary_buffered-ip-ssl", "d-cpp_binary_framed-ip", @@ -116,11 +128,8 @@ "d-d_binary_http-ip", "d-d_compact_http-ip", "d-d_json_http-ip", - "d-dart_binary_framed-ip", "d-dart_binary_http-ip", - "d-dart_compact_framed-ip", "d-dart_compact_http-ip", - "d-dart_json_framed-ip", "d-dart_json_http-ip", "d-go_binary_http-ip", "d-go_binary_http-ip-ssl", @@ -135,8 +144,6 @@ "d-java_json_http-ip", "d-java_json_http-ip-ssl", "d-js_json_http-ip", - "d-lua_json_buffered-ip", - "d-lua_json_framed-ip", "d-nodejs_binary_buffered-ip", "d-nodejs_binary_buffered-ip-ssl", "d-nodejs_binary_framed-ip", @@ -202,11 +209,17 @@ "erl-nodejs_binary_buffered-ip", "erl-nodejs_compact_buffered-ip", "erl-rb_binary-accel_buffered-ip", + "erl-rb_binary-accel_buffered-ip-ssl", "erl-rb_binary-accel_framed-ip", + "erl-rb_binary-accel_framed-ip-ssl", "erl-rb_binary_buffered-ip", + "erl-rb_binary_buffered-ip-ssl", "erl-rb_binary_framed-ip", + "erl-rb_binary_framed-ip-ssl", "erl-rb_compact_buffered-ip", + "erl-rb_compact_buffered-ip-ssl", "erl-rb_compact_framed-ip", + "erl-rb_compact_framed-ip-ssl", "go-cpp_binary_http-ip", "go-cpp_binary_http-ip-ssl", "go-cpp_compact_http-ip", @@ -231,44 +244,32 @@ "go-nodejs_json_framed-ip", "hs-csharp_binary_framed-ip", "hs-csharp_compact_framed-ip", - "hs-dart_binary_framed-ip", - "hs-dart_compact_framed-ip", - "hs-dart_json_framed-ip", "java-d_compact_buffered-ip", "java-d_compact_buffered-ip-ssl", "java-d_compact_framed-ip", + "netcore-csharp_binary_buffered-ip-ssl", + "netcore-csharp_binary_framed-ip-ssl", + "netcore-csharp_compact_buffered-ip-ssl", + "netcore-csharp_compact_framed-ip-ssl", + "netcore-csharp_json_buffered-ip-ssl", + "netcore-csharp_json_framed-ip-ssl", + "nodejs-cpp_binary_http-domain", "nodejs-cpp_binary_http-ip", "nodejs-cpp_binary_http-ip-ssl", + "nodejs-cpp_compact_http-domain", "nodejs-cpp_compact_http-ip", "nodejs-cpp_compact_http-ip-ssl", + "nodejs-cpp_json_http-domain", "nodejs-cpp_json_http-ip", "nodejs-cpp_json_http-ip-ssl", - "nodejs-d_binary_buffered-ip", - "nodejs-d_binary_buffered-ip-ssl", - "nodejs-d_binary_framed-ip", - "nodejs-d_binary_framed-ip-ssl", "nodejs-d_binary_http-ip", "nodejs-d_binary_http-ip-ssl", - "nodejs-d_compact_buffered-ip", - "nodejs-d_compact_buffered-ip-ssl", - "nodejs-d_compact_framed-ip", - "nodejs-d_compact_framed-ip-ssl", "nodejs-d_compact_http-ip", "nodejs-d_compact_http-ip-ssl", - "nodejs-d_json_buffered-ip", - "nodejs-d_json_buffered-ip-ssl", - "nodejs-d_json_framed-ip", - "nodejs-d_json_framed-ip-ssl", "nodejs-d_json_http-ip", "nodejs-d_json_http-ip-ssl", - "nodejs-dart_binary_buffered-ip", - "nodejs-dart_binary_framed-ip", "nodejs-dart_binary_http-ip", - "nodejs-dart_compact_buffered-ip", - "nodejs-dart_compact_framed-ip", "nodejs-dart_compact_http-ip", - "nodejs-dart_json_buffered-ip", - "nodejs-dart_json_framed-ip", "nodejs-dart_json_http-ip", "nodejs-go_binary_http-ip", "nodejs-go_binary_http-ip-ssl", @@ -303,8 +304,12 @@ "nodejs-netcore_json_framed-ip-ssl", "perl-rs_multi_buffered-ip", "perl-rs_multi_framed-ip", + "rb-cpp_json_buffered-domain", "rb-cpp_json_buffered-ip", + "rb-cpp_json_buffered-ip-ssl", + "rb-cpp_json_framed-domain", "rb-cpp_json_framed-ip", + "rb-cpp_json_framed-ip-ssl", "rs-cpp_binary_buffered-ip", "rs-cpp_binary_framed-ip", "rs-cpp_compact_buffered-ip", @@ -316,21 +321,5 @@ "rs-cpp_multic-compact_buffered-ip", "rs-cpp_multic-compact_framed-ip", "rs-cpp_multic_buffered-ip", - "rs-cpp_multic_framed-ip", - "rs-csharp_binary_buffered-ip", - "rs-csharp_binary_framed-ip", - "rs-csharp_compact_buffered-ip", - "rs-csharp_compact_framed-ip", - "rs-csharp_multi-binary_buffered-ip", - "rs-csharp_multi-binary_framed-ip", - "rs-csharp_multi_buffered-ip", - "rs-csharp_multi_framed-ip", - "rs-csharp_multic-compact_buffered-ip", - "rs-csharp_multic-compact_framed-ip", - "rs-csharp_multic_buffered-ip", - "rs-csharp_multic_framed-ip", - "rs-dart_binary_framed-ip", - "rs-dart_compact_framed-ip", - "rs-dart_multi-binary_framed-ip", - "rs-dart_multic-compact_framed-ip" + "rs-cpp_multic_framed-ip" ] diff --git a/vendor/git.apache.org/thrift.git/test/netcore/Client/.gitignore b/vendor/git.apache.org/thrift.git/test/netcore/Client/.gitignore new file mode 100644 index 000000000..67d55106a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/netcore/Client/.gitignore @@ -0,0 +1,2 @@ +# ignore for autogenerated files +/ThriftTest diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/ThriftTest.csproj b/vendor/git.apache.org/thrift.git/test/netcore/Client/Client.csproj similarity index 55% rename from vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/ThriftTest.csproj rename to vendor/git.apache.org/thrift.git/test/netcore/Client/Client.csproj index 664710194..f16af390a 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/ThriftTest.csproj +++ b/vendor/git.apache.org/thrift.git/test/netcore/Client/Client.csproj @@ -1,9 +1,8 @@ - netcoreapp2.0 - ThriftTest - ThriftTest + Client + Client Exe false false @@ -12,16 +11,21 @@ false false - - - + + + + + + + + diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Program.cs b/vendor/git.apache.org/thrift.git/test/netcore/Client/Program.cs similarity index 82% rename from vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Program.cs rename to vendor/git.apache.org/thrift.git/test/netcore/Client/Program.cs index 94ed9d910..72139d9de 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Program.cs +++ b/vendor/git.apache.org/thrift.git/test/netcore/Client/Program.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -17,9 +17,9 @@ using System; using System.Collections.Generic; -using Test; +using ThriftTest; -namespace ThriftTest +namespace Client { public class Program { @@ -48,8 +48,6 @@ namespace ThriftTest { case "client": return TestClient.Execute(subArgs); - case "server": - return TestServer.Execute(subArgs); case "--help": PrintHelp(); return 0; @@ -62,12 +60,10 @@ namespace ThriftTest private static void PrintHelp() { Console.WriteLine("Usage:"); - Console.WriteLine(" ThriftTest server [options]'"); - Console.WriteLine(" ThriftTest client [options]'"); - Console.WriteLine(" ThriftTest --help"); + Console.WriteLine(" Client client [options]'"); + Console.WriteLine(" Client --help"); Console.WriteLine(""); - TestServer.PrintOptionsHelp(); TestClient.PrintOptionsHelp(); } } diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/test/netcore/Client/Properties/AssemblyInfo.cs similarity index 97% rename from vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/AssemblyInfo.cs rename to vendor/git.apache.org/thrift.git/test/netcore/Client/Properties/AssemblyInfo.cs index efc9e3342..157152b4d 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/test/netcore/Client/Properties/AssemblyInfo.cs @@ -22,7 +22,7 @@ using System.Runtime.InteropServices; // set of attributes. Change these attribute values to modify the information // associated with an assembly. -[assembly: AssemblyTitle("ThriftTest")] +[assembly: AssemblyTitle("Client")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("The Apache Software Foundation")] diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/TestClient.cs b/vendor/git.apache.org/thrift.git/test/netcore/Client/TestClient.cs similarity index 92% rename from vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/TestClient.cs rename to vendor/git.apache.org/thrift.git/test/netcore/Client/TestClient.cs index f6cc90095..8be198c69 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/TestClient.cs +++ b/vendor/git.apache.org/thrift.git/test/netcore/Client/TestClient.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -18,28 +18,29 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.IO; using System.Linq; using System.Net; using System.Reflection; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; +using System.ServiceModel; using System.Text; using System.Threading; using System.Threading.Tasks; -using ThriftAsync.Test; using Thrift.Collections; using Thrift.Protocols; using Thrift.Transports; using Thrift.Transports.Client; -namespace Test +namespace ThriftTest { public class TestClient { private class TestParams { public int numIterations = 1; - public IPAddress host = IPAddress.Loopback; + public IPAddress host = IPAddress.Any; public int port = 9090; public int numThreads = 1; public string url; @@ -111,11 +112,43 @@ namespace Test } else { - throw new ArgumentException(args[i]); + //throw new ArgumentException(args[i]); } } } + private static X509Certificate2 GetClientCert() + { + var clientCertName = "client.p12"; + var possiblePaths = new List + { + "../../../keys/", + "../../keys/", + "../keys/", + "keys/", + }; + + string existingPath = null; + foreach (var possiblePath in possiblePaths) + { + var path = Path.GetFullPath(possiblePath + clientCertName); + if (File.Exists(path)) + { + existingPath = path; + break; + } + } + + if (string.IsNullOrEmpty(existingPath)) + { + throw new FileNotFoundException($"Cannot find file: {clientCertName}"); + } + + var cert = new X509Certificate2(existingPath, "thrift"); + + return cert; + } + public TClientTransport CreateTransport() { if (url == null) @@ -131,9 +164,16 @@ namespace Test { if (encrypted) { - var certPath = "../../keys/client.p12"; - var cert = new X509Certificate2(certPath, "thrift"); - trans = new TTlsSocketClientTransport(host, port, 0, cert, (o, c, chain, errors) => true, null, SslProtocols.Tls); + var cert = GetClientCert(); + + if (cert == null || !cert.HasPrivateKey) + { + throw new InvalidOperationException("Certificate doesn't contain private key"); + } + + trans = new TTlsSocketClientTransport(host, port, 0, cert, + (sender, certificate, chain, errors) => true, + null, SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12); } else { @@ -184,7 +224,7 @@ namespace Test private class ClientTest { private readonly TClientTransport transport; - private readonly ThriftAsync.Test.ThriftTest.Client client; + private readonly ThriftTest.Client client; private readonly int numIterations; private bool done; @@ -193,7 +233,7 @@ namespace Test public ClientTest(TestParams param) { transport = param.CreateTransport(); - client = new ThriftAsync.Test.ThriftTest.Client(param.CreateProtocol(transport)); + client = new ThriftTest.Client(param.CreateProtocol(transport)); numIterations = param.numIterations; } @@ -224,6 +264,14 @@ namespace Test Console.WriteLine(ex.Message + " ST: " + ex.StackTrace); continue; } + catch (Exception ex) + { + Console.WriteLine("*** FAILED ***"); + Console.WriteLine("Connect failed: " + ex.Message); + ReturnCode |= ErrorUnknown; + Console.WriteLine(ex.Message + " ST: " + ex.StackTrace); + continue; + } try { @@ -285,17 +333,14 @@ namespace Test var tests = Enumerable.Range(0, param.numThreads).Select(_ => new ClientTest(param)).ToArray(); //issue tests on separate threads simultaneously - var threads = tests.Select(test => new Thread(test.Execute)).ToArray(); + var threads = tests.Select(test => new Task(test.Execute)).ToArray(); var start = DateTime.Now; foreach (var t in threads) { t.Start(); } - foreach (var t in threads) - { - t.Join(); - } + Task.WaitAll(threads); Console.WriteLine("Total time: " + (DateTime.Now - start)); Console.WriteLine(); @@ -351,7 +396,7 @@ namespace Test return retval; } - public static async Task ExecuteClientTestAsync(ThriftAsync.Test.ThriftTest.Client client) + public static async Task ExecuteClientTestAsync(ThriftTest.Client client) { var token = CancellationToken.None; var returnCode = 0; diff --git a/vendor/git.apache.org/thrift.git/test/netcore/Makefile.am b/vendor/git.apache.org/thrift.git/test/netcore/Makefile.am index e029a24cb..376ffb71e 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/Makefile.am +++ b/vendor/git.apache.org/thrift.git/test/netcore/Makefile.am @@ -19,41 +19,23 @@ SUBDIRS = . -THRIFT = $(top_builddir)/compiler/cpp/thrift - -GENDIR = ThriftTest/gen-netcore - -THRIFTCODE = \ - ThriftTest/TestClient.cs \ - ThriftTest/TestServer.cs \ - ThriftTest/Properties/AssemblyInfo.cs \ - ThriftTest/Program.cs - -all-local: \ - ThriftTest/stage/ThriftTest.dll - -ThriftTest/stage/ThriftTest.dll: $(THRIFTCODE) - $(MKDIR_P) $(GENDIR) - $(THRIFT) -gen netcore:wcf -r -out $(GENDIR) $(top_srcdir)/test/ThriftTest.thrift - $(DOTNETCORE) --info - $(DOTNETCORE) restore +all-local: $(DOTNETCORE) build -precross: \ - ThriftTest/stage/ThriftTest.dll +precross: + $(DOTNETCORE) build clean-local: - $(RM) ThriftTest.exe - $(RM) -r $(GENDIR) - $(RM) -r ThriftTest/bin - $(RM) -r ThriftTest/obj + $(RM) -r Client/bin + $(RM) -r Server/bin + $(RM) -r Client/obj + $(RM) -r Server/obj + $(RM) -r ThriftTest/ThriftTest EXTRA_DIST = \ - $(THRIFTCODE) \ - ThriftTest.sln \ - ThriftTest/ThriftTest.csproj \ - ThriftTest/Properties/launchSettings.json \ - build.cmd \ - build.sh \ - README.md - + Client \ + README.md \ + Server \ + ThriftTest.sln \ + build.cmd \ + build.sh diff --git a/vendor/git.apache.org/thrift.git/test/netcore/README.md b/vendor/git.apache.org/thrift.git/test/netcore/README.md index 05eb0e212..ed728d1ba 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/README.md +++ b/vendor/git.apache.org/thrift.git/test/netcore/README.md @@ -9,7 +9,10 @@ Tests for Thrift client library ported to Microsoft .Net Core - NET Core Standard 1.6 (SDK 2.0.0) # How to build on Windows +- Get Thrift IDL compiler executable, add to some folder and add path to this folder into PATH variable - Open ThriftTest.sln in Visual Studio and build +or +- Build with scripts # How to build on Unix - Ensure you have .NET Core 2.0.0 SDK installed or use the Ubuntu Xenial docker image diff --git a/vendor/git.apache.org/thrift.git/test/netcore/Server/.gitignore b/vendor/git.apache.org/thrift.git/test/netcore/Server/.gitignore new file mode 100644 index 000000000..67d55106a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/netcore/Server/.gitignore @@ -0,0 +1,2 @@ +# ignore for autogenerated files +/ThriftTest diff --git a/vendor/git.apache.org/thrift.git/test/netcore/Server/Program.cs b/vendor/git.apache.org/thrift.git/test/netcore/Server/Program.cs new file mode 100644 index 000000000..e647e5b2a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/netcore/Server/Program.cs @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +using System; +using System.Collections.Generic; +using ThriftTest; + +namespace Server +{ + public class Program + { + public static int Main(string[] args) + { + try + { + Console.SetBufferSize(Console.BufferWidth, 4096); + } + catch (Exception) + { + Console.WriteLine("Failed to grow scroll-back buffer"); + } + + // split mode and options + var subArgs = new List(args); + var firstArg = string.Empty; + if (subArgs.Count > 0) + { + firstArg = subArgs[0]; + subArgs.RemoveAt(0); + } + + // run whatever mode is choosen + switch(firstArg) + { + case "server": + return TestServer.Execute(subArgs); + case "--help": + PrintHelp(); + return 0; + default: + PrintHelp(); + return -1; + } + } + + private static void PrintHelp() + { + Console.WriteLine("Usage:"); + Console.WriteLine(" Server server [options]'"); + Console.WriteLine(" Server --help"); + Console.WriteLine(""); + + TestServer.PrintOptionsHelp(); + } + } +} + + diff --git a/vendor/git.apache.org/thrift.git/test/netcore/Server/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/test/netcore/Server/Properties/AssemblyInfo.cs new file mode 100644 index 000000000..265495c05 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/netcore/Server/Properties/AssemblyInfo.cs @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation(ASF) under one +// or more contributor license agreements.See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership.The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. + +[assembly: AssemblyTitle("Server")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("The Apache Software Foundation")] +[assembly: AssemblyProduct("Thrift")] +[assembly: AssemblyCopyright("The Apache Software Foundation")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. + +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM + +[assembly: Guid("B0C13DA0-3117-4844-8AE8-B1775E46223D")] + diff --git a/vendor/git.apache.org/thrift.git/test/netcore/Server/Server.csproj b/vendor/git.apache.org/thrift.git/test/netcore/Server/Server.csproj new file mode 100644 index 000000000..2f9b4b1f5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/netcore/Server/Server.csproj @@ -0,0 +1,31 @@ + + + netcoreapp2.0 + Server + Server + Exe + false + false + false + false + false + false + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/TestServer.cs b/vendor/git.apache.org/thrift.git/test/netcore/Server/TestServer.cs similarity index 89% rename from vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/TestServer.cs rename to vendor/git.apache.org/thrift.git/test/netcore/Server/TestServer.cs index aa25c9102..bfd33357f 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/TestServer.cs +++ b/vendor/git.apache.org/thrift.git/test/netcore/Server/TestServer.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -17,13 +17,14 @@ using System; using System.Collections.Generic; +using System.IO; +using System.Linq; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; -using ThriftAsync.Test; using Thrift; using Thrift.Collections; using Thrift.Protocols; @@ -31,7 +32,7 @@ using Thrift.Server; using Thrift.Transports; using Thrift.Transports.Server; -namespace Test +namespace ThriftTest { internal class ServerParam { @@ -93,7 +94,7 @@ namespace Test } else { - throw new ArgumentException(args[i]); + //throw new ArgumentException(args[i]); } } @@ -132,9 +133,9 @@ namespace Test callCount++; return Task.CompletedTask; } - }; + } - public class TestHandlerAsync : ThriftAsync.Test.ThriftTest.IAsync + public class TestHandlerAsync : ThriftTest.IAsync { public TBaseServer server { get; set; } private int handlerID; @@ -439,20 +440,13 @@ namespace Test public Task testOnewayAsync(int secondsToSleep, CancellationToken cancellationToken) { logger.Invoke("testOneway({0}), sleeping...", secondsToSleep); - Thread.Sleep(secondsToSleep * 1000); + Task.Delay(secondsToSleep * 1000, cancellationToken).GetAwaiter().GetResult(); logger.Invoke("testOneway finished"); return Task.CompletedTask; } } - - private enum ProcessorFactoryType - { - TSingletonProcessorFactory, - TPrototypeProcessorFactory, - } - internal static void PrintOptionsHelp() { Console.WriteLine("Server options:"); @@ -466,8 +460,41 @@ namespace Test Console.WriteLine(); } + private static X509Certificate2 GetServerCert() + { + var serverCertName = "server.p12"; + var possiblePaths = new List + { + "../../../keys/", + "../../keys/", + "../keys/", + "keys/", + }; + + string existingPath = null; + foreach (var possiblePath in possiblePaths) + { + var path = Path.GetFullPath(possiblePath + serverCertName); + if (File.Exists(path)) + { + existingPath = path; + break; + } + } + + if (string.IsNullOrEmpty(existingPath)) + { + throw new FileNotFoundException($"Cannot find file: {serverCertName}"); + } + + var cert = new X509Certificate2(existingPath, "thrift"); + + return cert; + } + public static int Execute(List args) { + var loggerFactory = new LoggerFactory();//.AddConsole().AddDebug(); var logger = new LoggerFactory().CreateLogger("Test"); try @@ -493,16 +520,28 @@ namespace Test { trans = new TNamedPipeServerTransport(param.pipe); } +// else if (param.useFramed) +// { +// trans = new TServerFramedTransport(param.port); +// } else { if (param.useEncryption) { - var certPath = "../../keys/server.p12"; - trans = new TTlsServerSocketTransport(param.port, param.useBufferedSockets, new X509Certificate2(certPath, "thrift"), null, null, SslProtocols.Tls); + var cert = GetServerCert(); + + if (cert == null || !cert.HasPrivateKey) + { + throw new InvalidOperationException("Certificate doesn't contain private key"); + } + + trans = new TTlsServerSocketTransport(param.port, param.useBufferedSockets, param.useFramed, cert, + (sender, certificate, chain, errors) => true, + null, SslProtocols.Tls | SslProtocols.Tls11 | SslProtocols.Tls12); } else { - trans = new TServerSocketTransport(param.port, 0, param.useBufferedSockets); + trans = new TServerSocketTransport(param.port, 0, param.useBufferedSockets, param.useFramed); } } @@ -518,15 +557,10 @@ namespace Test // Processor var testHandler = new TestHandlerAsync(); - var testProcessor = new ThriftAsync.Test.ThriftTest.AsyncProcessor(testHandler); + var testProcessor = new ThriftTest.AsyncProcessor(testHandler); processorFactory = new SingletonTProcessorFactory(testProcessor); - - TTransportFactory transFactory; - if (param.useFramed) - throw new NotImplementedException("framed"); // transFactory = new TFramedTransport.Factory(); - else - transFactory = new TTransportFactory(); + TTransportFactory transFactory = new TTransportFactory(); TBaseServer serverEngine = new AsyncBaseServer(processorFactory, trans, transFactory, transFactory, proto, proto, logger); diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest.sln b/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest.sln index c3f194bec..2ab241add 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest.sln +++ b/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest.sln @@ -1,11 +1,13 @@ -Microsoft Visual Studio Solution File, Format Version 12.00 +Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 VisualStudioVersion = 15.0.26730.12 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ThriftTest", "ThriftTest\ThriftTest.csproj", "{DDED46FF-F359-47B4-BA7E-9B70F216BD44}" -EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift", "..\..\lib\netcore\Thrift\Thrift.csproj", "{C20EA2A9-7660-47DE-9A49-D1EF12FB2895}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Client", "Client\Client.csproj", "{21039F25-6ED7-4E80-A545-EBC93472EBD1}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Server", "Server\Server.csproj", "{0C6E8685-F191-4479-9842-882A38961127}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -16,18 +18,6 @@ Global Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Debug|Any CPU.Build.0 = Debug|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Debug|x64.ActiveCfg = Debug|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Debug|x64.Build.0 = Debug|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Debug|x86.ActiveCfg = Debug|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Debug|x86.Build.0 = Debug|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Release|Any CPU.ActiveCfg = Release|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Release|Any CPU.Build.0 = Release|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Release|x64.ActiveCfg = Release|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Release|x64.Build.0 = Release|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Release|x86.ActiveCfg = Release|Any CPU - {DDED46FF-F359-47B4-BA7E-9B70F216BD44}.Release|x86.Build.0 = Release|Any CPU {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|Any CPU.Build.0 = Debug|Any CPU {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|x64.ActiveCfg = Debug|Any CPU @@ -40,6 +30,30 @@ Global {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x64.Build.0 = Release|Any CPU {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x86.ActiveCfg = Release|Any CPU {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x86.Build.0 = Release|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Debug|Any CPU.Build.0 = Debug|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Debug|x64.ActiveCfg = Debug|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Debug|x64.Build.0 = Debug|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Debug|x86.ActiveCfg = Debug|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Debug|x86.Build.0 = Debug|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Release|Any CPU.ActiveCfg = Release|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Release|Any CPU.Build.0 = Release|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Release|x64.ActiveCfg = Release|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Release|x64.Build.0 = Release|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Release|x86.ActiveCfg = Release|Any CPU + {21039F25-6ED7-4E80-A545-EBC93472EBD1}.Release|x86.Build.0 = Release|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Debug|x64.ActiveCfg = Debug|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Debug|x64.Build.0 = Debug|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Debug|x86.ActiveCfg = Debug|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Debug|x86.Build.0 = Debug|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Release|Any CPU.Build.0 = Release|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Release|x64.ActiveCfg = Release|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Release|x64.Build.0 = Release|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Release|x86.ActiveCfg = Release|Any CPU + {0C6E8685-F191-4479-9842-882A38961127}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/launchSettings.json b/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/launchSettings.json deleted file mode 100644 index ddafa79a4..000000000 --- a/vendor/git.apache.org/thrift.git/test/netcore/ThriftTest/Properties/launchSettings.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "profiles": { - "ThriftTest": { - "commandName": "Project" - } - } -} \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/test/netcore/build.cmd b/vendor/git.apache.org/thrift.git/test/netcore/build.cmd index e971799d8..9b84ef276 100644 --- a/vendor/git.apache.org/thrift.git/test/netcore/build.cmd +++ b/vendor/git.apache.org/thrift.git/test/netcore/build.cmd @@ -19,12 +19,7 @@ rem * under the License. rem */ setlocal -cd Interfaces -thrift -gen netcore:wcf -r ..\..\tutorial.thrift -cd .. - dotnet --info -dotnet restore dotnet build :eof diff --git a/vendor/git.apache.org/thrift.git/test/netcore/build.sh b/vendor/git.apache.org/thrift.git/test/netcore/build.sh index 626635cfb..c97e310f0 100755 --- a/vendor/git.apache.org/thrift.git/test/netcore/build.sh +++ b/vendor/git.apache.org/thrift.git/test/netcore/build.sh @@ -22,10 +22,5 @@ #exit if any command fails set -e -cd ThriftTest -../../../compiler/cpp/thrift -gen netcore:wcf -r ../../ThriftTest.thrift -cd .. - dotnet --info -dotnet restore dotnet build diff --git a/vendor/git.apache.org/thrift.git/test/perl/TestServer.pl b/vendor/git.apache.org/thrift.git/test/perl/TestServer.pl index 7d8f929b4..e8c1cfa9e 100644 --- a/vendor/git.apache.org/thrift.git/test/perl/TestServer.pl +++ b/vendor/git.apache.org/thrift.git/test/perl/TestServer.pl @@ -26,6 +26,8 @@ use Data::Dumper; use Getopt::Long qw(GetOptions); use Time::HiRes qw(gettimeofday); +$SIG{INT} = \&sigint_handler; + use lib '../../lib/perl/lib'; use lib 'gen-perl'; @@ -146,6 +148,12 @@ if ($opts{"domain-socket"}) { my $server = new Thrift::SimpleServer($processor, $serversocket, $transport, $protocol); print "Starting \"simple\" server ($opts{transport}/$opts{protocol}) listen on: $listening_on\n"; $server->serve(); +print "done.\n"; + +sub sigint_handler { + print "received SIGINT, stopping...\n"; + $server->stop(); +} ### ### Test server implementation diff --git a/vendor/git.apache.org/thrift.git/test/php/Makefile.am b/vendor/git.apache.org/thrift.git/test/php/Makefile.am index 28357f6be..72f7fc581 100755 --- a/vendor/git.apache.org/thrift.git/test/php/Makefile.am +++ b/vendor/git.apache.org/thrift.git/test/php/Makefile.am @@ -20,8 +20,8 @@ stubs: ../ThriftTest.thrift $(THRIFT) --gen php ../ThriftTest.thrift $(THRIFT) --gen php:inlined ../ThriftTest.thrift - $(MKDIR_P) gen-php-psr4 - $(THRIFT) -out gen-php-psr4 --gen php:psr4 ../ThriftTest.thrift + $(MKDIR_P) gen-php-classmap + $(THRIFT) -out gen-php-classmap --gen php ../ThriftTest.thrift php_ext_dir: mkdir -p php_ext_dir @@ -33,7 +33,7 @@ precross: stubs php_ext_dir check: stubs php_ext_dir clean-local: - $(RM) -r gen-php gen-phpi gen-php-psr4 php_ext_dir + $(RM) -r gen-php gen-phpi gen-php-classmap php_ext_dir client: stubs php_ext_dir php TestClient.php diff --git a/vendor/git.apache.org/thrift.git/test/php/TestPsr4.php b/vendor/git.apache.org/thrift.git/test/php/TestClassmap.php similarity index 96% rename from vendor/git.apache.org/thrift.git/test/php/TestPsr4.php rename to vendor/git.apache.org/thrift.git/test/php/TestClassmap.php index d30bf1c49..6fd159437 100644 --- a/vendor/git.apache.org/thrift.git/test/php/TestPsr4.php +++ b/vendor/git.apache.org/thrift.git/test/php/TestClassmap.php @@ -18,6 +18,6 @@ */ diff --git a/vendor/git.apache.org/thrift.git/test/php/TestClient.php b/vendor/git.apache.org/thrift.git/test/php/TestClient.php index 1591027f4..acd901d88 100755 --- a/vendor/git.apache.org/thrift.git/test/php/TestClient.php +++ b/vendor/git.apache.org/thrift.git/test/php/TestClient.php @@ -2,7 +2,8 @@ namespace test\php; -require_once __DIR__.'/../../lib/php/lib/Thrift/ClassLoader/ThriftClassLoader.php'; +/** @var \Composer\Autoload\ClassLoader $loader */ +$loader = require __DIR__ . '/../../vendor/autoload.php'; use Thrift\ClassLoader\ThriftClassLoader; @@ -13,14 +14,14 @@ if (!isset($MODE)) { $MODE = 'normal'; } -$loader = new ThriftClassLoader(); -$loader->registerNamespace('Thrift', __DIR__ . '/../../lib/php/lib'); -if ($GEN_DIR === 'gen-php-psr4') { - $loader->registerNamespace('ThriftTest', $GEN_DIR); + +if ($GEN_DIR == 'gen-php') { + $loader->addPsr4('', $GEN_DIR); } else { + $loader = new ThriftClassLoader(); $loader->registerDefinition('ThriftTest', $GEN_DIR); + $loader->register(); } -$loader->register(); /* * Licensed to the Apache Software Foundation (ASF) under one @@ -94,7 +95,7 @@ foreach ($argv as $arg) { $MODE = substr($arg, 12); } else if (substr($arg, 0, 11) == '--protocol=') { $PROTO = substr($arg, 11); - } + } } $hosts = array('localhost'); diff --git a/vendor/git.apache.org/thrift.git/test/py/Makefile.am b/vendor/git.apache.org/thrift.git/test/py/Makefile.am index b3513dd7e..829620055 100644 --- a/vendor/git.apache.org/thrift.git/test/py/Makefile.am +++ b/vendor/git.apache.org/thrift.git/test/py/Makefile.am @@ -23,24 +23,31 @@ py_unit_tests = RunClientServer.py thrift_gen = \ gen-py/ThriftTest/__init__.py \ gen-py/DebugProtoTest/__init__.py \ + gen-py/DoubleConstantsTest/__init__.py \ gen-py/Recursive/__init__.py \ gen-py-default/ThriftTest/__init__.py \ gen-py-default/DebugProtoTest/__init__.py \ + gen-py-default/DoubleConstantsTest/__init__.py \ gen-py-default/Recursive/__init__.py \ gen-py-slots/ThriftTest/__init__.py \ gen-py-slots/DebugProtoTest/__init__.py \ + gen-py-slots/DoubleConstantsTest/__init__.py \ gen-py-slots/Recursive/__init__.py \ gen-py-oldstyle/ThriftTest/__init__.py \ gen-py-oldstyle/DebugProtoTest/__init__.py \ + gen-py-oldstyle/DoubleConstantsTest/__init__.py \ gen-py-oldstyle/Recursive/__init__.py \ gen-py-no_utf8strings/ThriftTest/__init__.py \ gen-py-no_utf8strings/DebugProtoTest/__init__.py \ + gen-py-no_utf8strings/DoubleConstantsTest/__init__.py \ gen-py-no_utf8strings/Recursive/__init__.py \ gen-py-dynamic/ThriftTest/__init__.py \ gen-py-dynamic/DebugProtoTest/__init__.py \ + gen-py-dynamic/DoubleConstantsTest/__init__.py \ gen-py-dynamic/Recursive/__init__.py \ gen-py-dynamicslots/ThriftTest/__init__.py \ gen-py-dynamicslots/DebugProtoTest/__init__.py \ + gen-py-dynamicslots/DoubleConstantsTest/__init__.py \ gen-py-dynamicslots/Recursive/__init__.py diff --git a/vendor/git.apache.org/thrift.git/test/py/RunClientServer.py b/vendor/git.apache.org/thrift.git/test/py/RunClientServer.py index 7c0f78739..b213d1acc 100755 --- a/vendor/git.apache.org/thrift.git/test/py/RunClientServer.py +++ b/vendor/git.apache.org/thrift.git/test/py/RunClientServer.py @@ -38,6 +38,7 @@ SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) SCRIPTS = [ 'FastbinaryTest.py', 'TestFrozen.py', + 'TestRenderedDoubleConstants.py', 'TSimpleJSONProtocolTest.py', 'SerializationTest.py', 'TestEof.py', diff --git a/vendor/git.apache.org/thrift.git/test/py/TestRenderedDoubleConstants.py b/vendor/git.apache.org/thrift.git/test/py/TestRenderedDoubleConstants.py new file mode 100644 index 000000000..20903d81b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/test/py/TestRenderedDoubleConstants.py @@ -0,0 +1,177 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import unittest + +from DoubleConstantsTest import constants + +# +# In order to run the test under Windows. We need to create symbolic link +# name 'thrift' to '../src' folder by using: +# +# mklink /D thrift ..\src +# + + +class TestRenderedDoubleConstants(unittest.TestCase): + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST = \ + "failed to verify a double constant generated by Thrift (expected = %f, got = %f)" + ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST =\ + "failed to verify a list item by Thrift (expected = %f, got = %f)" + ASSERTION_MESSAGE_FOR_TYPE_CHECKS = "the rendered variable with name %s is not of double type" + + # to make sure the variables inside Thrift files are generated correctly + def test_rendered_double_constants(self): + EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0 + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0 + EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0 + EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0 + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359 + EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1 + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1 + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308 + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43 + EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308 + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43 + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT, + places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT, + constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT, + places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT, + constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT, + places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT, + constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, + constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE, + places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE, + constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, + constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, + constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, + constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, + constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST)) + self.assertAlmostEqual( + constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, places=7, + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % ( + EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, + constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST)) + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST") + self.assertTrue( + isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, float), + msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS % + "DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST") + + # to make sure the variables inside Thrift files are generated correctly + def test_rendered_double_list(self): + EXPECTED_DOUBLE_LIST = [1.0, -100.0, 100.0, 9223372036854775807.0, -9223372036854775807.0, 3.14159265359, + 1000000.1, -1000000.1, 1.7e+308, -1.7e+308, 9223372036854775816.43, + -9223372036854775816.43] + self.assertEqual(len(constants.DOUBLE_LIST_TEST), len(EXPECTED_DOUBLE_LIST)) + for i, expectedValue in enumerate(EXPECTED_DOUBLE_LIST): + self.assertAlmostEqual(constants.DOUBLE_LIST_TEST[i], expectedValue, places=7) + + +def suite(): + suite = unittest.TestSuite() + loader = unittest.TestLoader() + suite.addTest(loader.loadTestsFromTestCase(TestRenderedDoubleConstants)) + return suite + + +if __name__ == "__main__": + unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2)) diff --git a/vendor/git.apache.org/thrift.git/test/py/generate.cmake b/vendor/git.apache.org/thrift.git/test/py/generate.cmake index 46263c84a..4ed14cc52 100644 --- a/vendor/git.apache.org/thrift.git/test/py/generate.cmake +++ b/vendor/git.apache.org/thrift.git/test/py/generate.cmake @@ -21,6 +21,13 @@ generate(${MY_PROJECT_DIR}/test/DebugProtoTest.thrift py:no_utf8strings gen-py-n generate(${MY_PROJECT_DIR}/test/DebugProtoTest.thrift py:dynamic gen-py-dynamic) generate(${MY_PROJECT_DIR}/test/DebugProtoTest.thrift py:dynamic,slots gen-py-dynamicslots) +generate(${MY_PROJECT_DIR}/test/DoubleConstantsTest.thrift py gen-py-default) +generate(${MY_PROJECT_DIR}/test/DoubleConstantsTest.thrift py:slots gen-py-slots) +generate(${MY_PROJECT_DIR}/test/DoubleConstantsTest.thrift py:old_style gen-py-oldstyle) +generate(${MY_PROJECT_DIR}/test/DoubleConstantsTest.thrift py:no_utf8strings gen-py-no_utf8strings) +generate(${MY_PROJECT_DIR}/test/DoubleConstantsTest.thrift py:dynamic gen-py-dynamic) +generate(${MY_PROJECT_DIR}/test/DoubleConstantsTest.thrift py:dynamic,slots gen-py-dynamicslots) + generate(${MY_PROJECT_DIR}/test/Recursive.thrift py gen-py-default) generate(${MY_PROJECT_DIR}/test/Recursive.thrift py:slots gen-py-slots) generate(${MY_PROJECT_DIR}/test/Recursive.thrift py:old_style gen-py-oldstyle) diff --git a/vendor/git.apache.org/thrift.git/test/rb/Gemfile b/vendor/git.apache.org/thrift.git/test/rb/Gemfile index 58c04aab0..4ff0539fd 100644 --- a/vendor/git.apache.org/thrift.git/test/rb/Gemfile +++ b/vendor/git.apache.org/thrift.git/test/rb/Gemfile @@ -2,6 +2,6 @@ source "http://rubygems.org" require "rubygems" -gem "rack", "~> 1.5.2" -gem "thin", "~> 1.5.0" -gem "test-unit" +gem 'rack', '~> 2.0', '>= 2.0.4' +gem 'thin', '~> 1.7', '>= 1.7.2' +gem 'test-unit', '~> 3.2', '>= 3.2.7' diff --git a/vendor/git.apache.org/thrift.git/test/rb/integration/TestClient.rb b/vendor/git.apache.org/thrift.git/test/rb/integration/TestClient.rb index beebe44e5..639aca99e 100755 --- a/vendor/git.apache.org/thrift.git/test/rb/integration/TestClient.rb +++ b/vendor/git.apache.org/thrift.git/test/rb/integration/TestClient.rb @@ -26,35 +26,63 @@ require 'test_helper' require 'thrift' require 'thrift_test' -$protocolType = "binary" +$domain_socket = nil $host = "localhost" $port = 9090 +$protocolType = "binary" +$ssl = false $transport = "buffered" + ARGV.each do|a| if a == "--help" puts "Allowed options:" puts "\t -h [ --help ] \t produce help message" - puts "\t--host arg (=localhost) \t Host to connect" - puts "\t--port arg (=9090) \t Port number to listen" - puts "\t--protocol arg (=binary) \t protocol: binary, accel" + puts "\t--domain-socket arg (=) \t Unix domain socket path" + puts "\t--host arg (=localhost) \t Host to connect \t not valid with domain-socket" + puts "\t--port arg (=9090) \t Port number to listen \t not valid with domain-socket" + puts "\t--protocol arg (=binary) \t protocol: accel, binary, compact, json" + puts "\t--ssl \t use ssl \t not valid with domain-socket" puts "\t--transport arg (=buffered) transport: buffered, framed, http" exit + elsif a.start_with?("--domain-socket") + $domain_socket = a.split("=")[1] elsif a.start_with?("--host") $host = a.split("=")[1] elsif a.start_with?("--protocol") $protocolType = a.split("=")[1] + elsif a == "--ssl" + $ssl = true elsif a.start_with?("--transport") $transport = a.split("=")[1] elsif a.start_with?("--port") $port = a.split("=")[1].to_i end end -ARGV=[] class SimpleClientTest < Test::Unit::TestCase def setup unless @socket - @socket = Thrift::Socket.new($host, $port) + if $domain_socket.to_s.strip.empty? + if $ssl + # the working directory for ruby crosstest is test/rb/gen-rb + keysDir = File.join(File.dirname(File.dirname(Dir.pwd)), "keys") + ctx = OpenSSL::SSL::SSLContext.new + ctx.ca_file = File.join(keysDir, "CA.pem") + ctx.cert = OpenSSL::X509::Certificate.new(File.open(File.join(keysDir, "client.crt"))) + ctx.cert_store = OpenSSL::X509::Store.new + ctx.cert_store.add_file(File.join(keysDir, 'server.pem')) + ctx.key = OpenSSL::PKey::RSA.new(File.open(File.join(keysDir, "client.key"))) + ctx.options = OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3 + ctx.ssl_version = :SSLv23 + ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + @socket = Thrift::SSLSocket.new($host, $port, nil, ctx) + else + @socket = Thrift::Socket.new($host, $port) + end + else + @socket = Thrift::UNIXSocket.new($domain_socket) + end + if $transport == "buffered" transportFactory = Thrift::BufferedTransport.new(@socket) elsif $transport == "framed" @@ -74,7 +102,7 @@ class SimpleClientTest < Test::Unit::TestCase else raise 'Unknown protocol type' end - @client = Thrift::Test::ThriftTest::Client.new(@protocol) + @client = Thrift::Test::ThriftTest::Client.new(@protocol) @socket.open end end diff --git a/vendor/git.apache.org/thrift.git/test/rb/integration/TestServer.rb b/vendor/git.apache.org/thrift.git/test/rb/integration/TestServer.rb index bab723a05..7caf6a8cb 100755 --- a/vendor/git.apache.org/thrift.git/test/rb/integration/TestServer.rb +++ b/vendor/git.apache.org/thrift.git/test/rb/integration/TestServer.rb @@ -106,21 +106,30 @@ class SimpleHandler end -protocol = "binary" +domain_socket = nil port = 9090 +protocol = "binary" +@protocolFactory = nil +ssl = false transport = "buffered" -@transportFactory = Thrift::BufferedTransportFactory.new -@protocolFactory = Thrift::BinaryProtocolFactory.new +@transportFactory = nil + ARGV.each do|a| if a == "--help" puts "Allowed options:" puts "\t -h [ --help ] \t produce help message" - puts "\t--port arg (=9090) \t Port number to listen" - puts "\t--protocol arg (=binary) \t protocol: binary, accel" + puts "\t--domain-socket arg (=) \t Unix domain socket path" + puts "\t--port arg (=9090) \t Port number to listen \t not valid with domain-socket" + puts "\t--protocol arg (=binary) \t protocol: accel, binary, compact, json" + puts "\t--ssl \t use ssl \t not valid with domain-socket" puts "\t--transport arg (=buffered) transport: buffered, framed, http" exit + elsif a.start_with?("--domain-socket") + domain_socket = a.split("=")[1] elsif a.start_with?("--protocol") protocol = a.split("=")[1] + elsif a == "--ssl" + ssl = true elsif a.start_with?("--transport") transport = a.split("=")[1] elsif a.start_with?("--port") @@ -128,9 +137,7 @@ ARGV.each do|a| end end -if protocol == "binary" - @protocolFactory = Thrift::BinaryProtocolFactory.new -elsif protocol == "" +if protocol == "binary" || protocol.to_s.strip.empty? @protocolFactory = Thrift::BinaryProtocolFactory.new elsif protocol == "compact" @protocolFactory = Thrift::CompactProtocolFactory.new @@ -142,9 +149,7 @@ else raise 'Unknown protocol type' end -if transport == "buffered" - @transportFactory = Thrift::BufferedTransportFactory.new -elsif transport == "" +if transport == "buffered" || transport.to_s.strip.empty? @transportFactory = Thrift::BufferedTransportFactory.new elsif transport == "framed" @transportFactory = Thrift::FramedTransportFactory.new @@ -152,8 +157,32 @@ else raise 'Unknown transport type' end -@handler = SimpleHandler.new +@handler = SimpleHandler.new @processor = Thrift::Test::ThriftTest::Processor.new(@handler) -@transport = Thrift::ServerSocket.new(port) -@server = Thrift::ThreadedServer.new(@processor, @transport, @transportFactory, @protocolFactory) +@transport = nil +if domain_socket.to_s.strip.empty? + if ssl + # the working directory for ruby crosstest is test/rb/gen-rb + keysDir = File.join(File.dirname(File.dirname(Dir.pwd)), "keys") + ctx = OpenSSL::SSL::SSLContext.new + ctx.ca_file = File.join(keysDir, "CA.pem") + ctx.cert = OpenSSL::X509::Certificate.new(File.open(File.join(keysDir, "server.crt"))) + ctx.cert_store = OpenSSL::X509::Store.new + ctx.cert_store.add_file(File.join(keysDir, 'client.pem')) + ctx.key = OpenSSL::PKey::RSA.new(File.open(File.join(keysDir, "server.key"))) + ctx.options = OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3 + ctx.ssl_version = :SSLv23 + ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + @transport = Thrift::SSLServerSocket.new(nil, port, ctx) + else + @transport = Thrift::ServerSocket.new(port) + end +else + @transport = Thrift::UNIXServerSocket.new(domain_socket) +end + +@server = Thrift::ThreadedServer.new(@processor, @transport, @transportFactory, @protocolFactory) + +puts "Starting TestServer #{@server.to_s}" @server.serve +puts "done." diff --git a/vendor/git.apache.org/thrift.git/test/test.py b/vendor/git.apache.org/thrift.git/test/test.py index 5a015eac7..24e7c4e47 100755 --- a/vendor/git.apache.org/thrift.git/test/test.py +++ b/vendor/git.apache.org/thrift.git/test/test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -18,12 +18,13 @@ # under the License. # -# Apache Thrift - integration test suite +# +# Apache Thrift - integration (cross) test suite # # tests different server-client, protocol and transport combinations # -# This script supports python 2.7 and later. -# python 3.x is recommended for better stability. +# This script requires python 3.x due to the improvements in +# subprocess management that are needed for reliability. # from __future__ import print_function @@ -38,6 +39,12 @@ import sys import crossrunner from crossrunner.compat import path_join +# 3.3 introduced subprocess timeouts on waiting for child +req_version = (3, 3) +cur_version = sys.version_info +assert (cur_version >= req_version), "Python 3.3 or later is required for proper operation." + + ROOT_DIR = os.path.dirname(os.path.realpath(os.path.dirname(__file__))) TEST_DIR_RELATIVE = 'test' TEST_DIR = path_join(ROOT_DIR, TEST_DIR_RELATIVE) @@ -161,9 +168,11 @@ def main(argv): options.update_failures, options.print_failures) elif options.features is not None: features = options.features or ['.*'] - res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures, options.retry_count, options.regex) + res = run_feature_tests(server_match, features, options.jobs, + options.skip_known_failures, options.retry_count, options.regex) else: - res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures, options.retry_count, options.regex) + res = run_cross_tests(server_match, client_match, options.jobs, + options.skip_known_failures, options.retry_count, options.regex) return 0 if res else 1 diff --git a/vendor/git.apache.org/thrift.git/test/tests.json b/vendor/git.apache.org/thrift.git/test/tests.json index c4e07eefb..4641f223b 100644 --- a/vendor/git.apache.org/thrift.git/test/tests.json +++ b/vendor/git.apache.org/thrift.git/test/tests.json @@ -42,11 +42,29 @@ ], "workdir": "c_glib" }, + { + "name": "cl", + "server": { + "command": ["TestServer"], + "workdir": "cl", + "protocols": ["binary", "multi"], + "transports": ["buffered", "framed"], + "sockets": ["ip"] + }, + "client": { + "command": ["TestClient"], + "workdir": "cl", + "protocols": ["binary", "multi"], + "transports": ["buffered", "framed"], + "sockets": ["ip"] + } + }, { "name": "d", "server": { "command": [ - "thrift_test_server" + "thrift_test_server", + "--trace" ] }, "client": { @@ -102,23 +120,12 @@ }, { "name": "java", - "join_args": true, - "command": [ - "ant", - "-f", - "build.xml", - "-Dno-gen-thrift=\"\"", - "-Dtestargs" - ], - "prepare": [ - "ant", - "-f", - "build.xml", - "compile-test" - ], + "join_args": false, "server": { - "delay": 10, - "extra_args": ["run-testserver"], + "delay": 15, + "command": [ + "build/runserver" + ], "protocols": [ "binary:multi", "compact:multic", @@ -127,7 +134,9 @@ }, "client": { "timeout": 13, - "extra_args": ["run-testclient"], + "command": [ + "build/runclient" + ], "transports": [ "http" ], @@ -183,7 +192,8 @@ ], "sockets": [ "ip", - "ip-ssl" + "ip-ssl", + "domain" ], "protocols": [ "compact", @@ -356,7 +366,8 @@ "timeout": 5, "command": [ "ruby", - "../integration/TestClient.rb" + "../integration/TestClient.rb", + "--" ] }, "transports": [ @@ -364,13 +375,15 @@ "framed" ], "sockets": [ - "ip" + "domain", + "ip", + "ip-ssl" ], "protocols": [ - "compact", "binary", - "json", - "binary:accel" + "binary:accel", + "compact", + "json" ], "workdir": "rb/gen-rb" }, @@ -430,13 +443,12 @@ "compact", "json" ], - "server-disabled": { + "server": { "command": [ "dotnet", "run", - "--no-build", - "--no-restore", - "--", + "--no-build", + "--project=Server/Server.csproj", "server" ] }, @@ -445,13 +457,12 @@ "command": [ "dotnet", "run", - "--no-build", - "--no-restore", - "--", + "--no-build", + "--project=Client/Client.csproj", "client" ] }, - "workdir": "netcore/ThriftTest" + "workdir": "netcore" }, { "name": "perl", diff --git a/vendor/git.apache.org/thrift.git/tutorial/Makefile.am b/vendor/git.apache.org/thrift.git/tutorial/Makefile.am index d8ad09c60..0499460aa 100755 --- a/vendor/git.apache.org/thrift.git/tutorial/Makefile.am +++ b/vendor/git.apache.org/thrift.git/tutorial/Makefile.am @@ -78,6 +78,10 @@ if WITH_RS SUBDIRS += rs endif +if WITH_CL +SUBDIRS += cl +endif + # # generate html for ThriftTest.thrift # diff --git a/vendor/git.apache.org/thrift.git/tutorial/cl/Makefile.am b/vendor/git.apache.org/thrift.git/tutorial/cl/Makefile.am new file mode 100755 index 000000000..fb6e83a42 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/cl/Makefile.am @@ -0,0 +1,47 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +gen-cl: $(top_srcdir)/tutorial/tutorial.thrift + $(THRIFT) --gen cl -r $< + +TutorialServer: make-tutorial-server.lisp + $(SBCL) --script make-tutorial-server.lisp + +TutorialClient: make-tutorial-client.lisp + $(SBCL) --script make-tutorial-client.lisp + +all-local: gen-cl TutorialClient TutorialServer + +tutorialserver: all + ./TutorialServer + +tutorialclient: all + ./TutorialClient + +clean-local: + $(RM) -r gen-* + $(RM) TutorialServer + $(RM) TutorialClient + +EXTRA_DIST = \ + tutorial-implementation.lisp \ + shared-implementation.lisp \ + thrift-tutorial.asd \ + make-tutorial-server.lisp \ + make-tutorial-client.lisp diff --git a/vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-client.lisp b/vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-client.lisp new file mode 100644 index 000000000..59450a2ea --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-client.lisp @@ -0,0 +1,51 @@ +(in-package #:cl-user) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(require "asdf") +(load (merge-pathnames "../../lib/cl/load-locally.lisp" *load-truename*)) +(asdf:load-system :net.didierverna.clon) +(asdf:load-asd (merge-pathnames "gen-cl/shared/thrift-gen-shared.asd" *load-truename*)) +(asdf:load-asd (merge-pathnames "gen-cl/tutorial/thrift-gen-tutorial.asd" *load-truename*)) +(asdf:load-asd (merge-pathnames "thrift-tutorial.asd" *load-truename*)) +(asdf:load-system :thrift-tutorial) + +(net.didierverna.clon:nickname-package) + +(defun main () + "Entry point for the binary." + (thrift:with-client (prot #u"thrift://127.0.0.1:9090") + (tutorial.calculator:ping prot) + (format t "ping()~%") + (format t "1 + 1 = ~a~%" (tutorial.calculator:add prot 1 1)) + (let ((work-instance (tutorial:make-work :num1 5 + :num2 0 + :op tutorial:operation.divide + :comment "Booya!"))) + (handler-case (format t + "5 / 0 = ~a - Oh, really? An exception should have been thrown here.~%" + (tutorial.calculator:calculate prot 1 work-instance)) + (tutorial:invalidoperation (e) + (format t "---~%(Expected) Invalid Operation caught: ~%~a~%---~%" e)))) + (let ((work-instance (tutorial:make-work :num1 15 + :num2 10 + :op tutorial:operation.subtract + :comment "Playing nice this time."))) + (handler-case (format t + "15 - 10 = ~a~%" + (tutorial.calculator:calculate prot 1 work-instance)) + (tutorial:invalidoperation (e) + (format t "---~%(Unexpected) Invalid Operation caught: ~%~a~%---~%" e)))) + (format t "Check log: ~a~%" (shared.shared-service:get-struct prot 1)))) + +(clon:dump "TutorialClient" main) diff --git a/vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-server.lisp b/vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-server.lisp new file mode 100644 index 000000000..5621ff366 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/cl/make-tutorial-server.lisp @@ -0,0 +1,29 @@ +(in-package #:cl-user) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(require "asdf") +(load (merge-pathnames "../../lib/cl/load-locally.lisp" *load-truename*)) +(asdf:load-system :net.didierverna.clon) +(asdf:load-asd (merge-pathnames "gen-cl/shared/thrift-gen-shared.asd" *load-truename*)) +(asdf:load-asd (merge-pathnames "gen-cl/tutorial/thrift-gen-tutorial.asd" *load-truename*)) +(asdf:load-asd (merge-pathnames "thrift-tutorial.asd" *load-truename*)) +(asdf:load-system :thrift-tutorial) + +(net.didierverna.clon:nickname-package) + +(defun main () + "Entry point for the binary." + (thrift:serve #u"thrift://127.0.0.1:9090" tutorial:calculator)) + +(clon:dump "TutorialServer" main) diff --git a/vendor/git.apache.org/thrift.git/tutorial/cl/shared-implementation.lisp b/vendor/git.apache.org/thrift.git/tutorial/cl/shared-implementation.lisp new file mode 100644 index 000000000..c197626a5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/cl/shared-implementation.lisp @@ -0,0 +1,25 @@ +(in-package #:shared-implementation) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(defvar *structs* (make-hash-table)) + +(defun shared.shared-service-implementation:get-struct (key) + (format t "getStruct(~a)~%" key) + (gethash key *structs*)) + +(defun add-log (key value) + (setf (gethash key *structs*) + (make-instance 'shared:sharedstruct + :key key + :value (write-to-string value)))) diff --git a/vendor/git.apache.org/thrift.git/tutorial/cl/thrift-tutorial.asd b/vendor/git.apache.org/thrift.git/tutorial/cl/thrift-tutorial.asd new file mode 100644 index 000000000..8a0353763 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/cl/thrift-tutorial.asd @@ -0,0 +1,17 @@ +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(asdf:defsystem #:thrift-tutorial + :depends-on (#:thrift-gen-tutorial) + :serial t + :components ((:file "shared-implementation") + (:file "tutorial-implementation"))) diff --git a/vendor/git.apache.org/thrift.git/tutorial/cl/tutorial-implementation.lisp b/vendor/git.apache.org/thrift.git/tutorial/cl/tutorial-implementation.lisp new file mode 100644 index 000000000..5c92fe405 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/cl/tutorial-implementation.lisp @@ -0,0 +1,41 @@ +(in-package #:tutorial-implementation) + +;;;; Licensed under the Apache License, Version 2.0 (the "License"); +;;;; you may not use this file except in compliance with the License. +;;;; You may obtain a copy of the License at +;;;; +;;;; http://www.apache.org/licenses/LICENSE-2.0 +;;;; +;;;; Unless required by applicable law or agreed to in writing, software +;;;; distributed under the License is distributed on an "AS IS" BASIS, +;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;;;; See the License for the specific language governing permissions and +;;;; limitations under the License. + +(defun tutorial.calculator-implementation:ping () + (format t "ping()~%")) + +(defun tutorial.calculator-implementation:add (num1 num2) + (format t "add(~a, ~a)~%" num1 num2) + (+ num1 num2)) + +(defun tutorial.calculator-implementation:calculate (logid work) + (format t "calculate(~a, ~a)~%" logid work) + (handler-case + (let* ((num1 (tutorial:work-num1 work)) + (num2 (tutorial:work-num2 work)) + (op (tutorial:work-op work)) + (result + (cond + ((= op tutorial:operation.add) (+ num1 num2)) + ((= op tutorial:operation.subtract) (- num1 num2)) + ((= op tutorial:operation.multiply) (* num1 num2)) + ((= op tutorial:operation.divide) (/ num1 num2))))) + (shared-implementation::add-log logid result) + result) + (division-by-zero () (error 'tutorial:invalidoperation + :why "Division by zero." + :what-op (tutorial:work-op work))))) + +(defun tutorial.calculator-implementation:zip () + (format t "zip()~%")) diff --git a/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpClient/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpClient/Properties/AssemblyInfo.cs index 2b801c2ca..1ff658c39 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpClient/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpClient/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpServer/Properties/AssemblyInfo.cs b/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpServer/Properties/AssemblyInfo.cs index b11cbdf93..74fa476e7 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpServer/Properties/AssemblyInfo.cs +++ b/vendor/git.apache.org/thrift.git/tutorial/csharp/CsharpServer/Properties/AssemblyInfo.cs @@ -51,5 +51,5 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyVersion("0.11.0.0")] -[assembly: AssemblyFileVersion("0.11.0.0")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] diff --git a/vendor/git.apache.org/thrift.git/tutorial/dart/client/pubspec.yaml b/vendor/git.apache.org/thrift.git/tutorial/dart/client/pubspec.yaml index 51b7ad2cd..db64afcfc 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/dart/client/pubspec.yaml +++ b/vendor/git.apache.org/thrift.git/tutorial/dart/client/pubspec.yaml @@ -16,7 +16,7 @@ # under the License. name: tutorial_client -version: 0.11.0 +version: 1.0.0-dev description: A Dart client implementation of the Apache Thrift tutorial author: Apache Thrift Developers homepage: http://thrift.apache.org @@ -25,7 +25,7 @@ environment: sdk: ">=1.13.0 <2.0.0" dependencies: - browser: ^0.11.0 + browser: ^0.10.0 shared: path: ../gen-dart/shared thrift: diff --git a/vendor/git.apache.org/thrift.git/tutorial/dart/console_client/pubspec.yaml b/vendor/git.apache.org/thrift.git/tutorial/dart/console_client/pubspec.yaml index dea54494f..cea13acd5 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/dart/console_client/pubspec.yaml +++ b/vendor/git.apache.org/thrift.git/tutorial/dart/console_client/pubspec.yaml @@ -16,7 +16,7 @@ # under the License. name: tutorial_console_client -version: 0.11.0 +version: 1.0.0-dev description: > A Dart console client to implementation of the Apache Thrift tutorial author: Apache Thrift Developers diff --git a/vendor/git.apache.org/thrift.git/tutorial/dart/server/pubspec.yaml b/vendor/git.apache.org/thrift.git/tutorial/dart/server/pubspec.yaml index b925d2f7c..4833a4af8 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/dart/server/pubspec.yaml +++ b/vendor/git.apache.org/thrift.git/tutorial/dart/server/pubspec.yaml @@ -16,7 +16,7 @@ # under the License. name: tutorial_server -version: 0.11.0 +version: 1.0.0-dev description: A Dart server to support the Apache Thrift tutorial author: Apache Thrift Developers homepage: http://thrift.apache.org diff --git a/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiClient/DelphiClient.dproj b/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiClient/DelphiClient.dproj index 38226b369..34aa53388 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiClient/DelphiClient.dproj +++ b/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiClient/DelphiClient.dproj @@ -97,13 +97,13 @@ Thrift Tutorial - 0.11.0.0 + 1.0.0.0 DelphiClient Copyright © 2012 The Apache Software Foundation DelphiClient.exe Thrift - 0.11.0.0 + 1.0.0.0 diff --git a/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiServer/DelphiServer.dproj b/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiServer/DelphiServer.dproj index ad47a53a3..74811bc10 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiServer/DelphiServer.dproj +++ b/vendor/git.apache.org/thrift.git/tutorial/delphi/DelphiServer/DelphiServer.dproj @@ -96,13 +96,13 @@ Thrift Tutorial - 0.11.0.0 + 1.0.0.0 DelphiServer Copyright © 2012 The Apache Software Foundation DelphiServer.exe Thrift - 0.11.0.0 + 1.0.0.0 diff --git a/vendor/git.apache.org/thrift.git/tutorial/go/Makefile.am b/vendor/git.apache.org/thrift.git/tutorial/go/Makefile.am index a146d5c9f..87a8f5a71 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/go/Makefile.am +++ b/vendor/git.apache.org/thrift.git/tutorial/go/Makefile.am @@ -17,10 +17,6 @@ # under the License. # -if GOVERSION_LT_17 -COMPILER_EXTRAFLAG=":legacy_context" -endif - gen-go/tutorial/calculator.go gen-go/shared/shared_service.go: $(top_srcdir)/tutorial/tutorial.thrift $(THRIFT) --gen go$(COMPILER_EXTRAFLAG) -r $< @@ -37,9 +33,6 @@ src/git.apache.org/thrift.git/lib/go/thrift: ln -sf $(realpath $(top_srcdir)/lib/go/thrift) src/git.apache.org/thrift.git/lib/go/thrift thirdparty-dep: - mkdir -p src/golang.org/x/net - GOPATH=`pwd`/gopath $(GO) get golang.org/x/net/context - ln -sf `pwd`/gopath/src/golang.org/x/net/context src/golang.org/x/net/context tutorialserver: all GOPATH=`pwd` $(GO) run src/*.go -server=true @@ -61,9 +54,6 @@ EXTRA_DIST = \ src/handler.go \ src/server.go \ src/main.go \ - src/go17.go \ - src/handler_go17.go \ - src/pre_go17.go \ server.crt \ server.key diff --git a/vendor/git.apache.org/thrift.git/tutorial/go/src/client.go b/vendor/git.apache.org/thrift.git/tutorial/go/src/client.go index 25616bf4e..1d658b954 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/go/src/client.go +++ b/vendor/git.apache.org/thrift.git/tutorial/go/src/client.go @@ -20,6 +20,7 @@ package main */ import ( + "context" "crypto/tls" "fmt" "tutorial" @@ -27,6 +28,8 @@ import ( "git.apache.org/thrift.git/lib/go/thrift" ) +var defaultCtx = context.Background() + func handleClient(client *tutorial.CalculatorClient) (err error) { client.Ping(defaultCtx) fmt.Println("ping()") diff --git a/vendor/git.apache.org/thrift.git/tutorial/go/src/handler.go b/vendor/git.apache.org/thrift.git/tutorial/go/src/handler.go index 783b43267..5c0eed006 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/go/src/handler.go +++ b/vendor/git.apache.org/thrift.git/tutorial/go/src/handler.go @@ -1,5 +1,3 @@ -// +build !go1.7 - package main /* @@ -22,12 +20,11 @@ package main */ import ( + "context" "fmt" "shared" "strconv" "tutorial" - - "golang.org/x/net/context" ) type CalculatorHandler struct { diff --git a/vendor/git.apache.org/thrift.git/tutorial/go/src/handler_go17.go b/vendor/git.apache.org/thrift.git/tutorial/go/src/handler_go17.go deleted file mode 100644 index d6752cc78..000000000 --- a/vendor/git.apache.org/thrift.git/tutorial/go/src/handler_go17.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build go1.7 - -package main - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import ( - "context" - "fmt" - "shared" - "strconv" - "tutorial" -) - -type CalculatorHandler struct { - log map[int]*shared.SharedStruct -} - -func NewCalculatorHandler() *CalculatorHandler { - return &CalculatorHandler{log: make(map[int]*shared.SharedStruct)} -} - -func (p *CalculatorHandler) Ping(ctx context.Context) (err error) { - fmt.Print("ping()\n") - return nil -} - -func (p *CalculatorHandler) Add(ctx context.Context, num1 int32, num2 int32) (retval17 int32, err error) { - fmt.Print("add(", num1, ",", num2, ")\n") - return num1 + num2, nil -} - -func (p *CalculatorHandler) Calculate(ctx context.Context, logid int32, w *tutorial.Work) (val int32, err error) { - fmt.Print("calculate(", logid, ", {", w.Op, ",", w.Num1, ",", w.Num2, "})\n") - switch w.Op { - case tutorial.Operation_ADD: - val = w.Num1 + w.Num2 - break - case tutorial.Operation_SUBTRACT: - val = w.Num1 - w.Num2 - break - case tutorial.Operation_MULTIPLY: - val = w.Num1 * w.Num2 - break - case tutorial.Operation_DIVIDE: - if w.Num2 == 0 { - ouch := tutorial.NewInvalidOperation() - ouch.WhatOp = int32(w.Op) - ouch.Why = "Cannot divide by 0" - err = ouch - return - } - val = w.Num1 / w.Num2 - break - default: - ouch := tutorial.NewInvalidOperation() - ouch.WhatOp = int32(w.Op) - ouch.Why = "Unknown operation" - err = ouch - return - } - entry := shared.NewSharedStruct() - entry.Key = logid - entry.Value = strconv.Itoa(int(val)) - k := int(logid) - /* - oldvalue, exists := p.log[k] - if exists { - fmt.Print("Replacing ", oldvalue, " with ", entry, " for key ", k, "\n") - } else { - fmt.Print("Adding ", entry, " for key ", k, "\n") - } - */ - p.log[k] = entry - return val, err -} - -func (p *CalculatorHandler) GetStruct(ctx context.Context, key int32) (*shared.SharedStruct, error) { - fmt.Print("getStruct(", key, ")\n") - v, _ := p.log[int(key)] - return v, nil -} - -func (p *CalculatorHandler) Zip(ctx context.Context) (err error) { - fmt.Print("zip()\n") - return nil -} diff --git a/vendor/git.apache.org/thrift.git/tutorial/go/src/pre_go17.go b/vendor/git.apache.org/thrift.git/tutorial/go/src/pre_go17.go deleted file mode 100644 index 10a6fb8d9..000000000 --- a/vendor/git.apache.org/thrift.git/tutorial/go/src/pre_go17.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.7 - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package main - -import "golang.org/x/net/context" - -var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/tutorial/hs/ThriftTutorial.cabal b/vendor/git.apache.org/thrift.git/tutorial/hs/ThriftTutorial.cabal index 98fbd7ba5..88f137940 100755 --- a/vendor/git.apache.org/thrift.git/tutorial/hs/ThriftTutorial.cabal +++ b/vendor/git.apache.org/thrift.git/tutorial/hs/ThriftTutorial.cabal @@ -18,7 +18,7 @@ -- Name: ThriftTutorial -Version: 0.11.0 +Version: 0.1.0 Cabal-Version: >= 1.4 License: OtherLicense Category: Foreign diff --git a/vendor/git.apache.org/thrift.git/tutorial/java/build.xml b/vendor/git.apache.org/thrift.git/tutorial/java/build.xml index c895ea908..55cdb8fab 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/java/build.xml +++ b/vendor/git.apache.org/thrift.git/tutorial/java/build.xml @@ -25,11 +25,13 @@ - - - + + + + + - + @@ -42,7 +44,7 @@ - + diff --git a/vendor/git.apache.org/thrift.git/tutorial/js/build.xml b/vendor/git.apache.org/thrift.git/tutorial/js/build.xml index 2df2e7125..03a6e7c64 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/js/build.xml +++ b/vendor/git.apache.org/thrift.git/tutorial/js/build.xml @@ -31,11 +31,13 @@ - - - + + + + + - + @@ -80,7 +82,7 @@ - + diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/Client/Program.cs b/vendor/git.apache.org/thrift.git/tutorial/netcore/Client/Program.cs index ae1837b98..ce5d8c7e4 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/Client/Program.cs +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/Client/Program.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -42,14 +42,14 @@ namespace Client { Logger.LogInformation(@" Usage: - Client.exe -h + Client.exe -help will diplay help information - Client.exe -t: -p: -mc: + Client.exe -tr: -pr: -mc: will run client with specified arguments (tcp transport and binary protocol by default) and with 1 client Options: - -t (transport): + -tr (transport): tcp - (default) tcp transport will be used (host - ""localhost"", port - 9090) tcpbuffered - buffered transport over tcp will be used (host - ""localhost"", port - 9090) namedpipe - namedpipe transport will be used (pipe address - "".test"") @@ -57,7 +57,7 @@ Options: tcptls - tcp tls transport will be used (host - ""localhost"", port - 9090) framed - tcp framed transport will be used (host - ""localhost"", port - 9090) - -p (protocol): + -pr (protocol): binary - (default) binary protocol will be used compact - compact protocol will be used json - json protocol will be used @@ -67,7 +67,7 @@ Options: - number of multiple clients to connect to server (max 100, default 1) Sample: - Client.exe -t:tcp -p:binary + Client.exe -tr:tcp -p:binary "); } @@ -75,7 +75,7 @@ Sample: { args = args ?? new string[0]; - if (args.Any(x => x.StartsWith("-h", StringComparison.OrdinalIgnoreCase))) + if (args.Any(x => x.StartsWith("-help", StringComparison.OrdinalIgnoreCase))) { DisplayHelp(); return; @@ -127,7 +127,7 @@ Sample: private static TClientTransport GetTransport(string[] args) { - var transport = args.FirstOrDefault(x => x.StartsWith("-t"))?.Split(':')?[1]; + var transport = args.FirstOrDefault(x => x.StartsWith("-tr"))?.Split(':')?[1]; Transport selectedTransport; if (Enum.TryParse(transport, true, out selectedTransport)) @@ -203,7 +203,7 @@ Sample: private static Tuple GetProtocol(string[] args, TClientTransport transport) { - var protocol = args.FirstOrDefault(x => x.StartsWith("-p"))?.Split(':')?[1]; + var protocol = args.FirstOrDefault(x => x.StartsWith("-pr"))?.Split(':')?[1]; Protocol selectedProtocol; if (Enum.TryParse(protocol, true, out selectedProtocol)) @@ -352,4 +352,4 @@ Sample: Multiplexed } } -} \ No newline at end of file +} diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/.gitignore b/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/.gitignore new file mode 100644 index 000000000..2e7446e33 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/.gitignore @@ -0,0 +1,3 @@ +# ignore for autogenerated files +/shared +/tutorial diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/Interfaces.csproj b/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/Interfaces.csproj index 14fad79f9..4297a0654 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/Interfaces.csproj +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/Interfaces/Interfaces.csproj @@ -17,4 +17,14 @@ + + + + + + + + + + diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/Makefile.am b/vendor/git.apache.org/thrift.git/tutorial/netcore/Makefile.am index ef3d618fe..e30555655 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/Makefile.am +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/Makefile.am @@ -19,39 +19,11 @@ SUBDIRS = . -THRIFT = $(top_builddir)/compiler/cpp/thrift - -GENDIR = Interfaces/gen-netcore - -# Due to a known issue with "dotnet restore" the Thrift.dll dependency cannot be resolved from cmdline. -# The problem does NOT affect Visual Studio builds, only cmdline. -# - For details see https://github.com/dotnet/cli/issues/3199 and related tickets. -# - Workaround is to temporarily copy the Thrift project into the solution -COPYCMD = cp -u -p -r - - -THRIFTCODE = \ - Interfaces/Properties/AssemblyInfo.cs \ - Client/Properties/AssemblyInfo.cs \ - Client/Program.cs \ - Server/Properties/AssemblyInfo.cs \ - Server/Program.cs - -all-local: \ - Client.exe - -Client.exe: $(THRIFTCODE) - $(MKDIR_P) $(GENDIR) - $(THRIFT) -gen netcore:wcf -r -out $(GENDIR) $(top_srcdir)/tutorial/tutorial.thrift - $(DOTNETCORE) --info - $(DOTNETCORE) restore +all-local: $(DOTNETCORE) build clean-local: - $(RM) Client.exe - $(RM) Server.exe $(RM) Interfaces.dll - $(RM) -r $(GENDIR) $(RM) -r Client/bin $(RM) -r Client/obj $(RM) -r Server/bin @@ -60,16 +32,11 @@ clean-local: $(RM) -r Interfaces/obj EXTRA_DIST = \ - $(THRIFTCODE) \ - Tutorial.sln \ - Interfaces/Interfaces.csproj \ - Client/Client.csproj \ - Client/ThriftTest.pfx \ - Client/Properties/launchSettings.json \ - Server/Server.csproj \ - Server/ThriftTest.pfx \ - Server/Properties/launchSettings.json \ - build.cmd \ - build.sh \ - README.md + Client \ + Interfaces \ + README.md \ + Server \ + Tutorial.sln \ + build.cmd \ + build.sh diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/README.md b/vendor/git.apache.org/thrift.git/tutorial/netcore/README.md index 6b2f6606e..626ef9212 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/README.md +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/README.md @@ -1,11 +1,11 @@ # Building of samples for different platforms # Reused components -- NET Core Standard 1.6 (SDK 1.0.0-preview2-003121) -- NET Core App 1.1 +- NET Core Standard 2.0 +- NET Core App 2.0 # How to build -- Download and install .NET Core SDK for your platform https://www.microsoft.com/net/core#windowsvs2015 (archive for SDK 1.0.0-preview2-003121 located by: https://github.com/dotnet/core/blob/master/release-notes/download-archive.md) +- Download and install the latest .NET Core SDK for your platform https://www.microsoft.com/net/core#windowsvs2015 (archive for SDK 1.0.0-preview2-003121 located by: https://github.com/dotnet/core/blob/master/release-notes/download-archive.md) - Ensure that you have thrift.exe which supports netcore lib and it added to PATH - Go to current folder - Run **build.sh** or **build.cmd** from the root of cloned repository @@ -18,16 +18,13 @@ Notes: dotnet run supports passing arguments to app after -- symbols (https://do - build - go to folder (Client/Server) -- run with specifying of correct parameters **dotnet run -t:tcp -p:multiplexed**, **dotnet run -help** (later, after migration to csproj and latest SDK will be possibility to use more usable form **dotnet run -- arguments**) +- run with specifying of correct parameters **dotnet run -tr:tcp -pr:multiplexed**, **dotnet run -help** (later, after migration to csproj and latest SDK will be possibility to use more usable form **dotnet run -- arguments**) #Notes -- Migration to .NET Standard 2.0 planned for later (Q1 2017) according to https://blogs.msdn.microsoft.com/dotnet/2016/09/26/introducing-net-standard/ - Possible adding additional platforms after stabilization of .NET Core (runtimes, platforms (Red Hat Linux, OpenSuse, etc.) #Known issues - In trace logging mode you can see some not important internal exceptions -- Ubuntu 16.10 still not supported fully -- There is some problems with .NET Core CLI and usage specific -r|--runtime for building and publishing projects with different target frameworks (netstandard1.6 and netcoreapp1.1) # Running of samples Please install Thrift C# .NET Core library or copy sources and build them to correcly build and run samples @@ -39,12 +36,12 @@ Usage: Server.exe -h will diplay help information - Server.exe -t: -p: + Server.exe -tr: -pr: will run server with specified arguments (tcp transport and binary protocol by default) Options: - -t (transport): + -tr (transport): tcp - (default) tcp transport will be used (host - ""localhost"", port - 9090) tcpbuffered - tcp buffered transport will be used (host - ""localhost"", port - 9090) namedpipe - namedpipe transport will be used (pipe address - "".test"") @@ -52,14 +49,14 @@ Options: tcptls - tcp transport with tls will be used (host - ""localhost"", port - 9090) framed - tcp framed transport will be used (host - ""localhost"", port - 9090) - -p (protocol): + -pr (protocol): binary - (default) binary protocol will be used compact - compact protocol will be used json - json protocol will be used Sample: - Server.exe -t:tcp + Server.exe -tr:tcp **Remarks**: @@ -75,12 +72,12 @@ Usage: Client.exe -h will diplay help information - Client.exe -t: -p: -mc: + Client.exe -tr: -pr: -mc: will run client with specified arguments (tcp transport and binary protocol by default) Options: - -t (transport): + -tr (transport): tcp - (default) tcp transport will be used (host - ""localhost"", port - 9090) tcpbuffered - buffered transport over tcp will be used (host - ""localhost"", port - 9090) namedpipe - namedpipe transport will be used (pipe address - "".test"") @@ -88,7 +85,7 @@ Options: tcptls - tcp tls transport will be used (host - ""localhost"", port - 9090) framed - tcp framed transport will be used (host - ""localhost"", port - 9090) - -p (protocol): + -pr (protocol): binary - (default) binary protocol will be used compact - compact protocol will be used json - json protocol will be used @@ -98,7 +95,7 @@ Options: Sample: - Client.exe -t:tcp -p:binary -mc:10 + Client.exe -tr:tcp -pr:binary -mc:10 Remarks: diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/Server/Program.cs b/vendor/git.apache.org/thrift.git/tutorial/netcore/Server/Program.cs index b8cc02eb6..6a181bab7 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/Server/Program.cs +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/Server/Program.cs @@ -1,4 +1,4 @@ -// Licensed to the Apache Software Foundation(ASF) under one +// Licensed to the Apache Software Foundation(ASF) under one // or more contributor license agreements.See the NOTICE file // distributed with this work for additional information // regarding copyright ownership.The ASF licenses this file @@ -46,7 +46,7 @@ namespace Server { args = args ?? new string[0]; - if (args.Any(x => x.StartsWith("-h", StringComparison.OrdinalIgnoreCase))) + if (args.Any(x => x.StartsWith("-help", StringComparison.OrdinalIgnoreCase))) { DisplayHelp(); return; @@ -69,14 +69,14 @@ namespace Server { Logger.LogInformation(@" Usage: - Server.exe -h + Server.exe -help will diplay help information - Server.exe -t: -p: + Server.exe -tr: -pr: will run server with specified arguments (tcp transport and binary protocol by default) Options: - -t (transport): + -tr (transport): tcp - (default) tcp transport will be used (host - ""localhost"", port - 9090) tcpbuffered - tcp buffered transport will be used (host - ""localhost"", port - 9090) namedpipe - namedpipe transport will be used (pipe address - "".test"") @@ -84,14 +84,14 @@ Options: tcptls - tcp transport with tls will be used (host - ""localhost"", port - 9090) framed - tcp framed transport will be used (host - ""localhost"", port - 9090) - -p (protocol): + -pr (protocol): binary - (default) binary protocol will be used compact - compact protocol will be used json - json protocol will be used multiplexed - multiplexed protocol will be used Sample: - Server.exe -t:tcp + Server.exe -tr:tcp "); } @@ -112,20 +112,18 @@ Sample: private static Protocol GetProtocol(string[] args) { - var transport = args.FirstOrDefault(x => x.StartsWith("-p"))?.Split(':')?[1]; - Protocol selectedProtocol; + var transport = args.FirstOrDefault(x => x.StartsWith("-pr"))?.Split(':')?[1]; - Enum.TryParse(transport, true, out selectedProtocol); + Enum.TryParse(transport, true, out Protocol selectedProtocol); return selectedProtocol; } private static Transport GetTransport(string[] args) { - var transport = args.FirstOrDefault(x => x.StartsWith("-t"))?.Split(':')?[1]; - Transport selectedTransport; + var transport = args.FirstOrDefault(x => x.StartsWith("-tr"))?.Split(':')?[1]; - Enum.TryParse(transport, true, out selectedTransport); + Enum.TryParse(transport, true, out Transport selectedTransport); return selectedTransport; } @@ -288,11 +286,10 @@ Sample: .UseKestrel() .UseUrls("http://localhost:9090") .UseContentRoot(Directory.GetCurrentDirectory()) - .UseIISIntegration() .UseStartup() .Build(); - host.StartAsync(cancellationToken); // was Run() in earlier .NET Core SDKs? + host.RunAsync(cancellationToken).GetAwaiter().GetResult(); } public class Startup diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/Tutorial.sln b/vendor/git.apache.org/thrift.git/tutorial/netcore/Tutorial.sln index bff110c79..2ddcd4617 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/Tutorial.sln +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/Tutorial.sln @@ -1,68 +1,78 @@ - Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 VisualStudioVersion = 15.0.26114.2 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Thrift", "..\..\lib\netcore\Thrift\Thrift.csproj", "{C20EA2A9-7660-47DE-9A49-D1EF12FB2895}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thrift", "..\..\lib\netcore\Thrift\Thrift.csproj", "{C20EA2A9-7660-47DE-9A49-D1EF12FB2895}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Interfaces", "Interfaces\Interfaces.csproj", "{B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Interfaces", "Interfaces\Interfaces.csproj", "{B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Client", "Client\Client.csproj", "{E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Client", "Client\Client.csproj", "{E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Server", "Server\Server.csproj", "{E08F5B84-2B4A-4E09-82D1-E0715775CE5E}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Server", "Server\Server.csproj", "{E08F5B84-2B4A-4E09-82D1-E0715775CE5E}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU Debug|x64 = Debug|x64 Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU Release|x64 = Release|x64 Release|x86 = Release|x86 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {6850CF46-5467-4C65-BD78-871581C539FC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {6850CF46-5467-4C65-BD78-871581C539FC}.Debug|Any CPU.Build.0 = Debug|Any CPU - {6850CF46-5467-4C65-BD78-871581C539FC}.Release|Any CPU.ActiveCfg = Release|Any CPU - {6850CF46-5467-4C65-BD78-871581C539FC}.Release|Any CPU.Build.0 = Release|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|x64.ActiveCfg = Debug|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|x64.Build.0 = Debug|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|x86.ActiveCfg = Debug|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Debug|x86.Build.0 = Debug|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|Any CPU.Build.0 = Release|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x64.ActiveCfg = Release|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x64.Build.0 = Release|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x86.ActiveCfg = Release|Any CPU + {C20EA2A9-7660-47DE-9A49-D1EF12FB2895}.Release|x86.Build.0 = Release|Any CPU {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|Any CPU.Build.0 = Debug|Any CPU - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x64.ActiveCfg = Debug|x64 - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x64.Build.0 = Debug|x64 - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x86.ActiveCfg = Debug|x86 - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x86.Build.0 = Debug|x86 + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x64.ActiveCfg = Debug|Any CPU + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x64.Build.0 = Debug|Any CPU + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x86.ActiveCfg = Debug|Any CPU + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Debug|x86.Build.0 = Debug|Any CPU {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|Any CPU.ActiveCfg = Release|Any CPU {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|Any CPU.Build.0 = Release|Any CPU - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x64.ActiveCfg = Release|x64 - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x64.Build.0 = Release|x64 - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x86.ActiveCfg = Release|x86 - {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x86.Build.0 = Release|x86 + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x64.ActiveCfg = Release|Any CPU + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x64.Build.0 = Release|Any CPU + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x86.ActiveCfg = Release|Any CPU + {B9E24D84-2712-4158-8F1A-DDE44CD1BB0A}.Release|x86.Build.0 = Release|Any CPU {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x64.ActiveCfg = Debug|x64 - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x64.Build.0 = Debug|x64 - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x86.ActiveCfg = Debug|x86 - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x86.Build.0 = Debug|x86 + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x64.ActiveCfg = Debug|Any CPU + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x64.Build.0 = Debug|Any CPU + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x86.ActiveCfg = Debug|Any CPU + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Debug|x86.Build.0 = Debug|Any CPU {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|Any CPU.ActiveCfg = Release|Any CPU {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|Any CPU.Build.0 = Release|Any CPU - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x64.ActiveCfg = Release|x64 - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x64.Build.0 = Release|x64 - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x86.ActiveCfg = Release|x86 - {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x86.Build.0 = Release|x86 + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x64.ActiveCfg = Release|Any CPU + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x64.Build.0 = Release|Any CPU + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x86.ActiveCfg = Release|Any CPU + {E4CA1EF0-B181-4A5D-A02C-DB0750A59CDF}.Release|x86.Build.0 = Release|Any CPU {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x64.ActiveCfg = Debug|x64 - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x64.Build.0 = Debug|x64 - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x86.ActiveCfg = Debug|x86 - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x86.Build.0 = Debug|x86 + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x64.ActiveCfg = Debug|Any CPU + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x64.Build.0 = Debug|Any CPU + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x86.ActiveCfg = Debug|Any CPU + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Debug|x86.Build.0 = Debug|Any CPU {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|Any CPU.ActiveCfg = Release|Any CPU {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|Any CPU.Build.0 = Release|Any CPU - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x64.ActiveCfg = Release|x64 - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x64.Build.0 = Release|x64 - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x86.ActiveCfg = Release|x86 - {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x86.Build.0 = Release|x86 + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x64.ActiveCfg = Release|Any CPU + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x64.Build.0 = Release|Any CPU + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x86.ActiveCfg = Release|Any CPU + {E08F5B84-2B4A-4E09-82D1-E0715775CE5E}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {070A5D1D-B29D-4603-999D-693DB444AD0D} + EndGlobalSection EndGlobal diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/build.cmd b/vendor/git.apache.org/thrift.git/tutorial/netcore/build.cmd index e971799d8..9b84ef276 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/build.cmd +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/build.cmd @@ -19,12 +19,7 @@ rem * under the License. rem */ setlocal -cd Interfaces -thrift -gen netcore:wcf -r ..\..\tutorial.thrift -cd .. - dotnet --info -dotnet restore dotnet build :eof diff --git a/vendor/git.apache.org/thrift.git/tutorial/netcore/build.sh b/vendor/git.apache.org/thrift.git/tutorial/netcore/build.sh index d2cb46511..c97e310f0 100755 --- a/vendor/git.apache.org/thrift.git/tutorial/netcore/build.sh +++ b/vendor/git.apache.org/thrift.git/tutorial/netcore/build.sh @@ -22,10 +22,5 @@ #exit if any command fails set -e -cd Interfaces -../../../compiler/cpp/thrift -gen netcore:wcf -r ../../tutorial.thrift -cd .. - dotnet --info -dotnet restore dotnet build diff --git a/vendor/git.apache.org/thrift.git/tutorial/ocaml/_oasis b/vendor/git.apache.org/thrift.git/tutorial/ocaml/_oasis index 898261d81..4cab08063 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/ocaml/_oasis +++ b/vendor/git.apache.org/thrift.git/tutorial/ocaml/_oasis @@ -1,5 +1,5 @@ Name: tutorial -Version: 0.11.0 +Version: 1.0 OASISFormat: 0.3 Synopsis: OCaml Tutorial example Authors: Apache Thrift Developers diff --git a/vendor/git.apache.org/thrift.git/tutorial/php/PhpClient.php b/vendor/git.apache.org/thrift.git/tutorial/php/PhpClient.php index d262b8fe9..92dc3cbac 100755 --- a/vendor/git.apache.org/thrift.git/tutorial/php/PhpClient.php +++ b/vendor/git.apache.org/thrift.git/tutorial/php/PhpClient.php @@ -5,7 +5,7 @@ namespace tutorial\php; error_reporting(E_ALL); -require_once __DIR__.'/../../lib/php/lib/Thrift/ClassLoader/ThriftClassLoader.php'; +require_once __DIR__.'/../../lib/php/lib/ClassLoader/ThriftClassLoader.php'; use Thrift\ClassLoader\ThriftClassLoader; diff --git a/vendor/git.apache.org/thrift.git/tutorial/php/PhpServer.php b/vendor/git.apache.org/thrift.git/tutorial/php/PhpServer.php index 22ae43eb8..5a9b49bde 100755 --- a/vendor/git.apache.org/thrift.git/tutorial/php/PhpServer.php +++ b/vendor/git.apache.org/thrift.git/tutorial/php/PhpServer.php @@ -5,7 +5,7 @@ namespace tutorial\php; error_reporting(E_ALL); -require_once __DIR__.'/../../lib/php/lib/Thrift/ClassLoader/ThriftClassLoader.php'; +require_once __DIR__.'/../../lib/php/lib/ClassLoader/ThriftClassLoader.php'; use Thrift\ClassLoader\ThriftClassLoader; diff --git a/vendor/git.apache.org/thrift.git/tutorial/shared.thrift b/vendor/git.apache.org/thrift.git/tutorial/shared.thrift index 3cc1bb34e..f1685bd16 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/shared.thrift +++ b/vendor/git.apache.org/thrift.git/tutorial/shared.thrift @@ -22,6 +22,7 @@ * these definitions. */ +namespace cl shared namespace cpp shared namespace d share // "shared" would collide with the eponymous D keyword. namespace dart shared diff --git a/vendor/git.apache.org/thrift.git/tutorial/tutorial.thrift b/vendor/git.apache.org/thrift.git/tutorial/tutorial.thrift index f8c5320d9..e02754644 100644 --- a/vendor/git.apache.org/thrift.git/tutorial/tutorial.thrift +++ b/vendor/git.apache.org/thrift.git/tutorial/tutorial.thrift @@ -62,6 +62,8 @@ include "shared.thrift" * Thrift files can namespace, package, or prefix their output in various * target languages. */ + +namespace cl tutorial namespace cpp tutorial namespace d tutorial namespace dart tutorial diff --git a/vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md b/vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..12acb3e29 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,2 @@ + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md index 57e723846..f0f7e3a8a 100644 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -23,4 +23,41 @@ If you don't know what to do, there are some features and functions that need to Feel free to create what you want, but keep in mind when you implement new features: - Code must be clear and readable, names of variables/constants clearly describes what they are doing - Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements \ No newline at end of file +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md index 223880940..efd8e64aa 100644 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -1,7 +1,7 @@ govalidator =========== [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). @@ -406,6 +406,15 @@ govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator })) ``` +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + #### Notes Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). @@ -437,6 +446,11 @@ Feel free to create what you want, but keep in mind when you implement new featu - Public functions must be documented and described in source file and added to README.md to the list of available functions - There are must be unit-tests for any new functions and improvements +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + #### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) * [Daniel Lohse](https://github.com/annismckenzie) * [Attila Oláh](https://github.com/attilaolah) @@ -447,3 +461,30 @@ Feel free to create what you want, but keep in mind when you implement new featu * [Nathan Davies](https://github.com/nathj07) * [Matt Sanford](https://github.com/mzsanford) * [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go index d69114c4b..cf1e5d569 100644 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -3,6 +3,7 @@ package govalidator import ( "encoding/json" "fmt" + "reflect" "strconv" ) @@ -30,13 +31,31 @@ func ToFloat(str string) (float64, error) { return res, err } -// ToInt convert the input string to an integer, or 0 if the input is not an integer. -func ToInt(str string) (int64, error) { - res, err := strconv.ParseInt(str, 0, 64) - if err != nil { +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) res = 0 } - return res, err + + return } // ToBoolean convert the input string to a boolean. diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go index d0140d421..7e6c652e1 100644 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -42,11 +42,14 @@ func IsNonPositive(value float64) bool { } // InRange returns true if value lies between left and right border -func InRangeInt(value, left, right int) bool { - if left > right { - left, right = right, left +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 } - return value >= left && value <= right + return value64 >= left64 && value64 <= right64 } // InRange returns true if value lies between left and right border diff --git a/vendor/github.com/asaskevich/govalidator/numerics_test.go b/vendor/github.com/asaskevich/govalidator/numerics_test.go index ca743dfed..8a28415de 100644 --- a/vendor/github.com/asaskevich/govalidator/numerics_test.go +++ b/vendor/github.com/asaskevich/govalidator/numerics_test.go @@ -181,7 +181,7 @@ func TestIsNatural(t *testing.T) { func TestInRangeInt(t *testing.T) { t.Parallel() - var tests = []struct { + var testAsInts = []struct { param int left int right int @@ -196,10 +196,210 @@ func TestInRangeInt(t *testing.T) { {0, 0, -1, true}, {0, 10, 5, false}, } - for _, test := range tests { + for _, test := range testAsInts { actual := InRangeInt(test.param, test.left, test.right) if actual != test.expected { - t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v", test.param, test.left, test.right, test.expected, actual) + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt8s = []struct { + param int8 + left int8 + right int8 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt8s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int8", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt16s = []struct { + param int16 + left int16 + right int16 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt16s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int16", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt32s = []struct { + param int32 + left int32 + right int32 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt32s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int32", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsInt64s = []struct { + param int64 + left int64 + right int64 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {-1, 0, 0, false}, + {0, -1, 1, true}, + {0, 0, 1, true}, + {0, -1, 0, true}, + {0, 0, -1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsInt64s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type int64", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInts = []struct { + param uint + left uint + right uint + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInts { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt8s = []struct { + param uint8 + left uint8 + right uint8 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt8s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt16s = []struct { + param uint16 + left uint16 + right uint16 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt16s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt32s = []struct { + param uint32 + left uint32 + right uint32 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt32s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsUInt64s = []struct { + param uint64 + left uint64 + right uint64 + expected bool + }{ + {0, 0, 0, true}, + {1, 0, 0, false}, + {0, 0, 1, true}, + {0, 10, 5, false}, + } + for _, test := range testAsUInt64s { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type uint", test.param, test.left, test.right, test.expected, actual) + } + } + + var testAsStrings = []struct { + param string + left string + right string + expected bool + }{ + {"0", "0", "0", true}, + {"1", "0", "0", false}, + {"-1", "0", "0", false}, + {"0", "-1", "1", true}, + {"0", "0", "1", true}, + {"0", "-1", "0", true}, + {"0", "0", "-1", true}, + {"0", "10", "5", false}, + } + for _, test := range testAsStrings { + actual := InRangeInt(test.param, test.left, test.right) + if actual != test.expected { + t.Errorf("Expected InRangeInt(%v, %v, %v) to be %v, got %v using type string", test.param, test.left, test.right, test.expected, actual) } } } diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go index 4a34e2240..8609cd22f 100644 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -4,7 +4,7 @@ import "regexp" // Basic regular expressions for validating strings const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + //Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" ISBN13 string = "^(?:[0-9]{13})$" @@ -43,6 +43,8 @@ const ( UnixPath string = `^(/[^/\x00]*)+/?$` Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" ) // Used by IsFilePath func @@ -56,7 +58,10 @@ const ( ) var ( - rxEmail = regexp.MustCompile(Email) + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + //rxEmail = regexp.MustCompile(Email) rxCreditCard = regexp.MustCompile(CreditCard) rxISBN10 = regexp.MustCompile(ISBN10) rxISBN13 = regexp.MustCompile(ISBN13) @@ -87,4 +92,6 @@ var ( rxWinPath = regexp.MustCompile(WinPath) rxUnixPath = regexp.MustCompile(UnixPath) rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) ) diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go index 78ed3fbab..6a8871c1c 100644 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -108,7 +108,9 @@ func CamelCaseToUnderscore(str string) string { var output []rune var segment []rune for _, r := range str { - if !unicode.IsLower(r) && string(r) != "_" { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { output = addSegment(output, segment) segment = nil } diff --git a/vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go b/vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go new file mode 100644 index 000000000..2ac4b6102 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils_benchmark_test.go @@ -0,0 +1,17 @@ +package govalidator + +import "testing" + +func BenchmarkContains(b *testing.B) { + b.ResetTimer() + for n := 0; n < b.N; n++ { + Contains("a0b01c012deffghijklmnopqrstu0123456vwxyz", "0123456789") + } +} + +func BenchmarkMatches(b *testing.B) { + b.ResetTimer() + for n := 0; n < b.N; n++ { + Matches("alfkjl12309fdjldfsa209jlksdfjLAKJjs9uJH234", "[\\w\\d]+") + } +} diff --git a/vendor/github.com/asaskevich/govalidator/utils_test.go b/vendor/github.com/asaskevich/govalidator/utils_test.go index 5ad8faeb4..97c97a534 100644 --- a/vendor/github.com/asaskevich/govalidator/utils_test.go +++ b/vendor/github.com/asaskevich/govalidator/utils_test.go @@ -270,6 +270,7 @@ func TestCamelCaseToUnderscore(t *testing.T) { {"ABC", "a_b_c"}, {"1B", "1_b"}, {"foo_bar", "foo_bar"}, + {"FooV2Bar", "foo_v2_bar"}, } for _, test := range tests { actual := CamelCaseToUnderscore(test.param) @@ -395,7 +396,7 @@ func TestNormalizeEmail(t *testing.T) { {`some.name.midd.lena.me.+extension@gmail.com`, `somenamemiddlename@gmail.com`}, {`some.name.midd.lena.me.+extension@googlemail.com`, `somenamemiddlename@gmail.com`}, {`some.name+extension@unknown.com`, `some.name+extension@unknown.com`}, - {`hans@m端ller.com`, `hans@m端ller.com`}, + // TODO: {`hans@m端ller.com`, `hans@m端ller.com`}, {`hans`, ``}, } for _, test := range tests { diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index 7c158c5f8..071f43c09 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -52,9 +52,33 @@ func SetFieldsRequiredByDefault(value bool) { } // IsEmail check if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) +func IsEmail(email string) bool { + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true } // IsURL check if the string is an URL. @@ -231,6 +255,22 @@ func IsUpperCase(str string) bool { return str == strings.ToUpper(str) } +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + // IsInt check if the string is an integer. Empty string is valid. func IsInt(str string) bool { if IsNull(str) { @@ -523,7 +563,7 @@ func IsHash(str string, algorithm string) bool { return false } - return Matches(str, "^[a-f0-9]{" + len + "}$") + return Matches(str, "^[a-f0-9]{"+len+"}$") } // IsDialString validates the given string for usage with the various Dial() functions @@ -678,7 +718,9 @@ func ValidateStruct(s interface{}) (bool, error) { continue // Private field } structResult := true - if valueField.Kind() == reflect.Struct && typeField.Tag.Get(tagName) != "-" { + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { var err error structResult, err = ValidateStruct(valueField.Interface()) if err != nil { @@ -890,7 +932,7 @@ func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap } return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required"} } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required"} + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required"} } // not required and empty is valid return true, nil @@ -994,7 +1036,11 @@ func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options delete(options, validatorSpec) switch v.Kind() { - case reflect.String: + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { if customMsgExists { diff --git a/vendor/github.com/asaskevich/govalidator/validator_test.go b/vendor/github.com/asaskevich/govalidator/validator_test.go index cf56f7a5a..226fc80df 100644 --- a/vendor/github.com/asaskevich/govalidator/validator_test.go +++ b/vendor/github.com/asaskevich/govalidator/validator_test.go @@ -499,6 +499,74 @@ func TestIsUpperCase(t *testing.T) { } } +func TestHasLowerCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc123", true}, + {"abc", true}, + {"a b c", true}, + {"abcß", true}, + {"abcẞ", true}, + {"ABCẞ", false}, + {"tr竪s 端ber", true}, + {"fooBar", true}, + {"123ABC", false}, + {"ABC123", false}, + {"ABC", false}, + {"S T R", false}, + {"fooBar", true}, + {"abacaba123", true}, + {"FÒÔBÀŘ", false}, + {"fòôbàř", true}, + {"fÒÔBÀŘ", true}, + } + for _, test := range tests { + actual := HasLowerCase(test.param) + if actual != test.expected { + t.Errorf("Expected HasLowerCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + +func TestHasUpperCase(t *testing.T) { + t.Parallel() + + var tests = []struct { + param string + expected bool + }{ + {"", true}, + {"abc123", false}, + {"abc", false}, + {"a b c", false}, + {"abcß", false}, + {"abcẞ", false}, + {"ABCẞ", true}, + {"tr竪s 端ber", false}, + {"fooBar", true}, + {"123ABC", true}, + {"ABC123", true}, + {"ABC", true}, + {"S T R", true}, + {"fooBar", true}, + {"abacaba123", false}, + {"FÒÔBÀŘ", true}, + {"fòôbàř", false}, + {"Fòôbàř", true}, + } + for _, test := range tests { + actual := HasUpperCase(test.param) + if actual != test.expected { + t.Errorf("Expected HasUpperCase(%q) to be %v, got %v", test.param, test.expected, actual) + } + } +} + func TestIsInt(t *testing.T) { t.Parallel() @@ -536,13 +604,12 @@ func TestIsInt(t *testing.T) { } } - func TestIsHash(t *testing.T) { t.Parallel() var tests = []struct { param string - algo string + algo string expected bool }{ {"3ca25ae354e192b26879f651a51d92aa8a34d8d3", "sha1", true}, @@ -576,19 +643,13 @@ func TestIsEmail(t *testing.T) { }{ {"", false}, {"foo@bar.com", true}, - {"x@x.x", true}, {"foo@bar.com.au", true}, {"foo+bar@bar.com", true}, {"foo@bar.coffee", true}, {"foo@bar.coffee..coffee", false}, - {"foo@bar.bar.coffee", true}, - {"foo@bar.中文网", true}, {"invalidemail@", false}, {"invalid.com", false}, {"@invalid.com", false}, - {"test|123@m端ller.com", true}, - {"hans@m端ller.com", true}, - {"hans.m端ller@test.com", true}, {"NathAn.daVIeS@DomaIn.cOM", true}, {"NATHAN.DAVIES@DOMAIN.CO.UK", true}, } @@ -744,7 +805,6 @@ func TestIsRequestURL(t *testing.T) { {"http://www.foo---bar.com/", true}, {"mailto:someone@example.com", true}, {"irc://irc.server.org/channel", true}, - {"irc://#channel@network", true}, {"/abs/test/dir", false}, {"./rel/test/dir", false}, } @@ -793,7 +853,6 @@ func TestIsRequestURI(t *testing.T) { {"http://www.foo---bar.com/", true}, {"mailto:someone@example.com", true}, {"irc://irc.server.org/channel", true}, - {"irc://#channel@network", true}, {"/abs/test/dir", true}, {"./rel/test/dir", false}, } @@ -2116,6 +2175,15 @@ type MissingValidationDeclarationStruct struct { Email string `valid:"required,email"` } +type FieldRequiredByDefault struct { + Email string `valid:"email"` +} + +type MultipleFieldsRequiredByDefault struct { + Url string `valid:"url"` + Email string `valid:"email"` +} + type FieldsRequiredByDefaultButExemptStruct struct { Name string `valid:"-"` Email string `valid:"email"` @@ -2152,6 +2220,46 @@ func TestValidateMissingValidationDeclarationStruct(t *testing.T) { SetFieldsRequiredByDefault(false) } +func TestFieldRequiredByDefault(t *testing.T) { + var tests = []struct { + param FieldRequiredByDefault + expected bool + }{ + {FieldRequiredByDefault{}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + +func TestMultipleFieldsRequiredByDefault(t *testing.T) { + var tests = []struct { + param MultipleFieldsRequiredByDefault + expected bool + }{ + {MultipleFieldsRequiredByDefault{}, false}, + } + SetFieldsRequiredByDefault(true) + for _, test := range tests { + actual, err := ValidateStruct(test.param) + if actual != test.expected { + t.Errorf("Expected ValidateStruct(%q) to be %v, got %v", test.param, test.expected, actual) + if err != nil { + t.Errorf("Got Error on ValidateStruct(%q): %s", test.param, err) + } + } + } + SetFieldsRequiredByDefault(false) +} + func TestFieldsRequiredByDefaultButExemptStruct(t *testing.T) { var tests = []struct { param FieldsRequiredByDefaultButExemptStruct @@ -2535,6 +2643,7 @@ func TestValidateStruct(t *testing.T) { {User{"John", "", "12345", 0, &Address{"Street", "123456789"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, {UserValid{"John", "john@yahoo.com", "123G#678", 20, &Address{"Street", "123456"}, []Address{{"Street", "123456"}, {"Street", "123456"}}}, true}, {UserValid{"John", "john!yahoo.com", "12345678", 20, &Address{"Street", "ABC456D89"}, []Address{}}, false}, + {UserValid{"John", "john@yahoo.com", "12345678", 20, &Address{"Street", "123456xxx"}, []Address{{"Street", "123456"}, {"Street", "123456"}}}, false}, {UserValid{"John", "john!yahoo.com", "12345678", 20, &Address{"Street", "ABC456D89"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, {UserValid{"John", "", "12345", 0, &Address{"Street", "123456789"}, []Address{{"Street", "ABC456D89"}, {"Street", "123456"}}}, false}, {nil, true}, @@ -2852,6 +2961,73 @@ func ExampleValidateStruct() { println(result) } +func TestValidateStructParamValidatorInt(t *testing.T) { + type Test1 struct { + Int int `valid:"range(1|10)"` + Int8 int8 `valid:"range(1|10)"` + Int16 int16 `valid:"range(1|10)"` + Int32 int32 `valid:"range(1|10)"` + Int64 int64 `valid:"range(1|10)"` + + Uint uint `valid:"range(1|10)"` + Uint8 uint8 `valid:"range(1|10)"` + Uint16 uint16 `valid:"range(1|10)"` + Uint32 uint32 `valid:"range(1|10)"` + Uint64 uint64 `valid:"range(1|10)"` + + Float32 float32 `valid:"range(1|10)"` + Float64 float64 `valid:"range(1|10)"` + } + test1Ok := &Test1{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} + test1NotOk := &Test1{11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11} + + _, err := ValidateStruct(test1Ok) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test1NotOk) + if err == nil { + t.Errorf("Test failed: nil") + } + + type Test2 struct { + Int int `valid:"in(1|10)"` + Int8 int8 `valid:"in(1|10)"` + Int16 int16 `valid:"in(1|10)"` + Int32 int32 `valid:"in(1|10)"` + Int64 int64 `valid:"in(1|10)"` + + Uint uint `valid:"in(1|10)"` + Uint8 uint8 `valid:"in(1|10)"` + Uint16 uint16 `valid:"in(1|10)"` + Uint32 uint32 `valid:"in(1|10)"` + Uint64 uint64 `valid:"in(1|10)"` + + Float32 float32 `valid:"in(1|10)"` + Float64 float64 `valid:"in(1|10)"` + } + + test2Ok1 := &Test2{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + test2Ok2 := &Test2{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10} + test2NotOk := &Test2{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2} + + _, err = ValidateStruct(test2Ok1) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test2Ok2) + if err != nil { + t.Errorf("Test failed: %s", err) + } + + _, err = ValidateStruct(test2NotOk) + if err == nil { + t.Errorf("Test failed: nil") + } +} + func TestIsCIDR(t *testing.T) { t.Parallel() diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index f4cabd669..d7d14f8eb 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream { // is guaranteed to be within (Quantile±Epsilon). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + ƒ := func(s *stream, r float64) float64 { var m = math.MaxFloat64 var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) } if f < m { m = f @@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream { return newStream(ƒ) } +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + // Stream computes quantiles for a stream of float64s. It is not thread-safe by // design. Take care when using across multiple goroutines. type Stream struct { diff --git a/vendor/github.com/containerd/continuity/fs/fstest/file.go b/vendor/github.com/containerd/continuity/fs/fstest/file.go index 9d614ee98..a3b491b88 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/file.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/file.go @@ -2,6 +2,7 @@ package fstest import ( "io/ioutil" + "net" "os" "path/filepath" "time" @@ -107,6 +108,18 @@ func Link(oldname, newname string) Applier { // } //} +func CreateSocket(name string, perm os.FileMode) Applier { + return applyFn(func(root string) error { + fullPath := filepath.Join(root, name) + ln, err := net.Listen("unix", fullPath) + if err != nil { + return err + } + defer ln.Close() + return os.Chmod(fullPath, perm) + }) +} + // Apply returns a new applier from the given appliers func Apply(appliers ...Applier) Applier { return applyFn(func(root string) error { diff --git a/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go b/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go index a26e8bbd8..2e1e6a7ce 100644 --- a/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go +++ b/vendor/github.com/containerd/continuity/fs/fstest/testsuite.go @@ -74,6 +74,7 @@ var ( Symlink("libnothing.so", "/usr/local/lib/libnothing.so.2"), CreateDir("/home", 0755), CreateDir("/home/derek", 0700), + CreateDir("/var/run/socket", 0700), ) // basicTest covers basic operations diff --git a/vendor/github.com/docker/docker/.github/CODEOWNERS b/vendor/github.com/docker/docker/.github/CODEOWNERS index ea7ae0518..908185496 100644 --- a/vendor/github.com/docker/docker/.github/CODEOWNERS +++ b/vendor/github.com/docker/docker/.github/CODEOWNERS @@ -3,7 +3,7 @@ # # KEEP THIS FILE SORTED. Order is important. Last match takes precedence. -builder/** @dnephin @tonistiigi +builder/** @tonistiigi client/** @dnephin contrib/mkimage/** @tianon daemon/graphdriver/devmapper/** @rhvgoyal @@ -12,10 +12,9 @@ daemon/graphdriver/overlay/** @dmcgowan daemon/graphdriver/overlay2/** @dmcgowan daemon/graphdriver/windows/** @johnstep @jhowardmsft daemon/logger/awslogs/** @samuelkarp -hack/** @dnephin @tianon +hack/** @tianon hack/integration-cli-on-swarm/** @AkihiroSuda -integration-cli/** @dnephin @vdemeester -integration/** @dnephin @vdemeester -pkg/testutil/** @dnephin +integration-cli/** @vdemeester +integration/** @vdemeester plugin/** @cpuguy83 project/** @thaJeztah diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile index 63e2dc3f0..8bc546bd9 100644 --- a/vendor/github.com/docker/docker/Dockerfile +++ b/vendor/github.com/docker/docker/Dockerfile @@ -44,9 +44,7 @@ FROM base AS criu # Install CRIU for checkpoint/restore support ENV CRIU_VERSION 3.6 # Install dependancy packages specific to criu -RUN case $(uname -m) in \ - x86_64) \ - apt-get update && apt-get install -y \ +RUN apt-get update && apt-get install -y \ libnet-dev \ libprotobuf-c0-dev \ libprotobuf-dev \ @@ -59,13 +57,7 @@ RUN case $(uname -m) in \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ - && make PREFIX=/opt/criu install-criu ;\ - ;; \ - armv7l|aarch64|ppc64le|s390x) \ - mkdir -p /opt/criu; \ - ;; \ - esac - + && make PREFIX=/opt/criu install-criu FROM base AS registry # Install two versions of the registry. The first is an older version that @@ -80,8 +72,8 @@ RUN set -x \ && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && case $(uname -m) in \ - x86_64|ppc64le|s390x) \ + && case $(dpkg --print-architecture) in \ + amd64|ppc64*|s390x) \ (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \ GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ @@ -91,21 +83,6 @@ RUN set -x \ -FROM base AS notary -# Install notary and notary-server -ENV NOTARY_VERSION v0.5.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -buildmode=pie -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - - - FROM base AS docker-py # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f @@ -248,7 +225,6 @@ COPY --from=containerd /opt/containerd/ /usr/local/bin/ COPY --from=proxy /opt/proxy/ /usr/local/bin/ COPY --from=dockercli /opt/dockercli /usr/local/cli COPY --from=registry /usr/local/bin/registry* /usr/local/bin/ -COPY --from=notary /usr/local/bin/notary* /usr/local/bin/ COPY --from=criu /opt/criu/ /usr/local/ COPY --from=docker-py /docker-py /docker-py # TODO: This is for the docker-py tests, which shouldn't really be needed for diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS index e9a5566ca..8f76e7101 100644 --- a/vendor/github.com/docker/docker/MAINTAINERS +++ b/vendor/github.com/docker/docker/MAINTAINERS @@ -71,18 +71,15 @@ # - close an issue or pull request when it's inappropriate or off-topic people = [ - "aboch", "alexellis", "andrewhsu", "anonymuse", "chanwit", - "ehazlett", "fntlnz", "gianarb", - "mgoelzer", "programmerq", "rheinwein", - "ripcurld0", + "ripcurld", "thajeztah" ] @@ -242,11 +239,6 @@ Email = "aaron.lehmann@docker.com" GitHub = "aaronlehmann" - [people.aboch] - Name = "Alessandro Boch" - Email = "aboch@docker.com" - GitHub = "aboch" - [people.alexellis] Name = "Alex Ellis" Email = "alexellis2@gmail.com" @@ -382,11 +374,6 @@ Email = "madhu@docker.com" GitHub = "mavenugo" - [people.mgoelzer] - Name = "Mike Goelzer" - Email = "mike.goelzer@docker.com" - GitHub = "mgoelzer" - [people.mhbauer] Name = "Morgan Bauer" Email = "mbauer@us.ibm.com" @@ -422,10 +409,10 @@ Email = "laura@codeship.com" GitHub = "rheinwein" - [people.ripcurld0] + [people.ripcurld] Name = "Boaz Shuster" Email = "ripcurld.github@gmail.com" - GitHub = "ripcurld0" + GitHub = "ripcurld" [people.runcom] Name = "Antonio Murdaca" diff --git a/vendor/github.com/docker/docker/TESTING.md b/vendor/github.com/docker/docker/TESTING.md index 20f7c9254..1231e1c5f 100644 --- a/vendor/github.com/docker/docker/TESTING.md +++ b/vendor/github.com/docker/docker/TESTING.md @@ -8,11 +8,11 @@ questions you may have as an aspiring Moby contributor. Moby has two test suites (and one legacy test suite): * Unit tests - use standard `go test` and - [testify](https://github.com/stretchr/testify) assertions. They are located in + [gotestyourself/assert](https://godoc.org/github.com/gotestyourself/gotestyourself/assert) assertions. They are located in the package they test. Unit tests should be fast and test only their own package. * API integration tests - use standard `go test` and - [testify](https://github.com/stretchr/testify) assertions. They are located in + [gotestyourself/assert](https://godoc.org/github.com/gotestyourself/gotestyourself/assert) assertions. They are located in `./integration/` directories, where `component` is: container, image, volume, etc. These tests perform HTTP requests to an API endpoint and check the HTTP response and daemon state after the call. diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug_test.go b/vendor/github.com/docker/docker/api/server/middleware/debug_test.go index a467c4a44..cc227b324 100644 --- a/vendor/github.com/docker/docker/api/server/middleware/debug_test.go +++ b/vendor/github.com/docker/docker/api/server/middleware/debug_test.go @@ -3,7 +3,8 @@ package middleware // import "github.com/docker/docker/api/server/middleware" import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestMaskSecretKeys(t *testing.T) { @@ -53,6 +54,6 @@ func TestMaskSecretKeys(t *testing.T) { for _, testcase := range tests { maskSecretKeys(testcase.input, testcase.path) - assert.Equal(t, testcase.expected, testcase.input) + assert.Check(t, is.DeepEqual(testcase.expected, testcase.input)) } } diff --git a/vendor/github.com/docker/docker/api/server/middleware/version_test.go b/vendor/github.com/docker/docker/api/server/middleware/version_test.go index 37d22b5c4..f426acf0a 100644 --- a/vendor/github.com/docker/docker/api/server/middleware/version_test.go +++ b/vendor/github.com/docker/docker/api/server/middleware/version_test.go @@ -7,7 +7,8 @@ import ( "testing" "github.com/docker/docker/api/server/httputils" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -17,7 +18,7 @@ func TestVersionMiddlewareVersion(t *testing.T) { expectedVersion := defaultVersion handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { v := httputils.VersionFromContext(ctx) - assert.Equal(t, expectedVersion, v) + assert.Check(t, is.Equal(expectedVersion, v)) return nil } @@ -56,9 +57,9 @@ func TestVersionMiddlewareVersion(t *testing.T) { err := h(ctx, resp, req, map[string]string{"version": test.reqVersion}) if test.errString != "" { - assert.EqualError(t, err, test.errString) + assert.Check(t, is.Error(err, test.errString)) } else { - assert.NoError(t, err) + assert.Check(t, err) } } } @@ -66,7 +67,7 @@ func TestVersionMiddlewareVersion(t *testing.T) { func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) { handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { v := httputils.VersionFromContext(ctx) - assert.NotEmpty(t, v) + assert.Check(t, len(v) != 0) return nil } @@ -81,11 +82,11 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) { vars := map[string]string{"version": "0.1"} err := h(ctx, resp, req, vars) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) hdr := resp.Result().Header - assert.Contains(t, hdr.Get("Server"), "Docker/"+defaultVersion) - assert.Contains(t, hdr.Get("Server"), runtime.GOOS) - assert.Equal(t, hdr.Get("API-Version"), defaultVersion) - assert.Equal(t, hdr.Get("OSType"), runtime.GOOS) + assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion)) + assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS)) + assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion)) + assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS)) } diff --git a/vendor/github.com/docker/docker/api/types/filters/parse_test.go b/vendor/github.com/docker/docker/api/types/filters/parse_test.go index b54ffa66e..fbd9ae4fb 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse_test.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse_test.go @@ -4,8 +4,8 @@ import ( "errors" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestParseArgs(t *testing.T) { @@ -22,10 +22,10 @@ func TestParseArgs(t *testing.T) { for i := range flagArgs { args, err = ParseFlag(flagArgs[i], args) - require.NoError(t, err) + assert.NilError(t, err) } - assert.Len(t, args.Get("created"), 1) - assert.Len(t, args.Get("image.name"), 2) + assert.Check(t, is.Len(args.Get("created"), 1)) + assert.Check(t, is.Len(args.Get("image.name"), 2)) } func TestParseArgsEdgeCase(t *testing.T) { @@ -231,7 +231,7 @@ func TestArgsMatch(t *testing.T) { } for args, field := range matches { - assert.True(t, args.Match(field, source), + assert.Check(t, args.Match(field, source), "Expected field %s to match %s", field, source) } @@ -255,8 +255,7 @@ func TestArgsMatch(t *testing.T) { } for args, field := range differs { - assert.False(t, args.Match(field, source), - "Expected field %s to not match %s", field, source) + assert.Check(t, !args.Match(field, source), "Expected field %s to not match %s", field, source) } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go b/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go index c46dd7d49..ae00e3b65 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/buildargs_test.go @@ -2,9 +2,11 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "bytes" + "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func strPtr(source string) *string { @@ -39,7 +41,7 @@ func TestGetAllAllowed(t *testing.T) { "ArgFromMeta": "frommeta1", "ArgFromMetaOverridden": "fromdockerfile3", } - assert.Equal(t, expected, all) + assert.Check(t, is.DeepEqual(expected, all)) } func TestGetAllMeta(t *testing.T) { @@ -61,7 +63,7 @@ func TestGetAllMeta(t *testing.T) { "ArgOverriddenByOptions": "fromopt2", "ArgNoDefaultInMetaFromOptions": "fromopt3", } - assert.Equal(t, expected, all) + assert.Check(t, is.DeepEqual(expected, all)) } func TestWarnOnUnusedBuildArgs(t *testing.T) { @@ -77,10 +79,10 @@ func TestWarnOnUnusedBuildArgs(t *testing.T) { buffer := new(bytes.Buffer) buildArgs.WarnOnUnusedBuildArgs(buffer) out := buffer.String() - assert.NotContains(t, out, "ThisArgIsUsed") - assert.NotContains(t, out, "HTTPS_PROXY") - assert.NotContains(t, out, "HTTP_PROXY") - assert.Contains(t, out, "ThisArgIsNotUsed") + assert.Assert(t, !strings.Contains(out, "ThisArgIsUsed"), out) + assert.Assert(t, !strings.Contains(out, "HTTPS_PROXY"), out) + assert.Assert(t, !strings.Contains(out, "HTTP_PROXY"), out) + assert.Check(t, is.Contains(out, "ThisArgIsNotUsed")) } func TestIsUnreferencedBuiltin(t *testing.T) { @@ -93,8 +95,8 @@ func TestIsUnreferencedBuiltin(t *testing.T) { buildArgs.AddArg("ThisArgIsUsed", nil) buildArgs.AddArg("HTTPS_PROXY", nil) - assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed")) - assert.True(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed")) - assert.True(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY")) - assert.False(t, buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY")) + assert.Check(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsUsed")) + assert.Check(t, buildArgs.IsReferencedOrNotBuiltin("ThisArgIsNotUsed")) + assert.Check(t, buildArgs.IsReferencedOrNotBuiltin("HTTPS_PROXY")) + assert.Check(t, !buildArgs.IsReferencedOrNotBuiltin("HTTP_PROXY")) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go index d20bc0403..d328235a1 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "io/ioutil" - "runtime" "strings" "time" @@ -104,18 +103,12 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( source = src } - os := runtime.GOOS - optionsPlatform := system.ParsePlatform(config.Options.Platform) - if dockerfile.OS != "" { - if optionsPlatform.OS != "" && optionsPlatform.OS != dockerfile.OS { - return nil, fmt.Errorf("invalid platform") - } - os = dockerfile.OS - } else if optionsPlatform.OS != "" { - os = optionsPlatform.OS + os := "" + apiPlatform := system.ParsePlatform(config.Options.Platform) + if apiPlatform.OS != "" { + os = apiPlatform.OS } config.Options.Platform = os - dockerfile.OS = os builderOptions := builderOptions{ Options: config.Options, diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go index a3a1f122f..6c73b6cce 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_test.go @@ -5,13 +5,14 @@ import ( "testing" "github.com/docker/docker/builder/dockerfile/parser" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestAddNodesForLabelOption(t *testing.T) { dockerfile := "FROM scratch" result, err := parser.Parse(strings.NewReader(dockerfile)) - assert.NoError(t, err) + assert.Check(t, err) labels := map[string]string{ "org.e": "cli-e", @@ -27,8 +28,8 @@ func TestAddNodesForLabelOption(t *testing.T) { "FROM scratch", `LABEL "org.a"='cli-a' "org.b"='cli-b' "org.c"='cli-c' "org.d"='cli-d' "org.e"='cli-e'`, } - assert.Len(t, nodes.Children, 2) + assert.Check(t, is.Len(nodes.Children, 2)) for i, v := range nodes.Children { - assert.Equal(t, expected[i], v.Original) + assert.Check(t, is.Equal(expected[i], v.Original)) } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go index da8e0711a..f2f895387 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_test.go @@ -5,8 +5,9 @@ import ( "testing" "github.com/docker/docker/pkg/containerfs" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/assert" ) func TestIsExistingDirectory(t *testing.T) { @@ -39,10 +40,10 @@ func TestIsExistingDirectory(t *testing.T) { for _, testcase := range testcases { result, err := isExistingDirectory(©Endpoint{driver: containerfs.NewLocalDriver(), path: testcase.path}) - if !assert.NoError(t, err) { + if !assert.Check(t, err) { continue } - assert.Equal(t, testcase.expected, result, testcase.doc) + assert.Check(t, is.Equal(testcase.expected, result), testcase.doc) } } @@ -142,6 +143,6 @@ func TestGetFilenameForDownload(t *testing.T) { resp.Header.Add("Content-Disposition", testcase.disposition) } filename := getFilenameForDownload(testcase.path, &resp) - assert.Equal(t, testcase.expected, filename) + assert.Check(t, is.Equal(testcase.expected, filename)) } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go index 99d4aa627..991c433b2 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -145,14 +145,17 @@ func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error imageRefOrID = stage.Image localOnly = true } - return d.builder.imageSources.Get(imageRefOrID, localOnly) + return d.builder.imageSources.Get(imageRefOrID, localOnly, d.state.operatingSystem) } -// FROM imagename[:tag | @digest] [AS build-stage-name] +// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name] // func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { d.builder.imageProber.Reset() - image, err := d.getFromImage(d.shlex, cmd.BaseName) + if err := system.ValidatePlatform(&cmd.Platform); err != nil { + return err + } + image, err := d.getFromImage(d.shlex, cmd.BaseName, cmd.Platform.OS) if err != nil { return err } @@ -210,20 +213,41 @@ func (d *dispatchRequest) getExpandedImageName(shlex *shell.Lex, name string) (s } return name, nil } -func (d *dispatchRequest) getImageOrStage(name string) (builder.Image, error) { + +// getOsFromFlagsAndStage calculates the operating system if we need to pull an image. +// stagePlatform contains the value supplied by optional `--platform=` on +// a current FROM statement. b.builder.options.Platform contains the operating +// system part of the optional flag passed in the API call (or CLI flag +// through `docker build --platform=...`). Precedence is for an explicit +// platform indication in the FROM statement. +func (d *dispatchRequest) getOsFromFlagsAndStage(stageOS string) string { + switch { + case stageOS != "": + return stageOS + case d.builder.options.Platform != "": + // Note this is API "platform", but by this point, as the daemon is not + // multi-arch aware yet, it is guaranteed to only hold the OS part here. + return d.builder.options.Platform + default: + return runtime.GOOS + } +} + +func (d *dispatchRequest) getImageOrStage(name string, stageOS string) (builder.Image, error) { var localOnly bool if im, ok := d.stages.getByName(name); ok { name = im.Image localOnly = true } + os := d.getOsFromFlagsAndStage(stageOS) + // Windows cannot support a container with no base image unless it is LCOW. if name == api.NoBaseImageSpecifier { imageImage := &image.Image{} imageImage.OS = runtime.GOOS if runtime.GOOS == "windows" { - optionsOS := system.ParsePlatform(d.builder.options.Platform).OS - switch optionsOS { + switch os { case "windows", "": return nil, errors.New("Windows does not support FROM scratch") case "linux": @@ -232,23 +256,23 @@ func (d *dispatchRequest) getImageOrStage(name string) (builder.Image, error) { } imageImage.OS = "linux" default: - return nil, errors.Errorf("operating system %q is not supported", optionsOS) + return nil, errors.Errorf("operating system %q is not supported", os) } } return builder.Image(imageImage), nil } - imageMount, err := d.builder.imageSources.Get(name, localOnly) + imageMount, err := d.builder.imageSources.Get(name, localOnly, os) if err != nil { return nil, err } return imageMount.Image(), nil } -func (d *dispatchRequest) getFromImage(shlex *shell.Lex, name string) (builder.Image, error) { +func (d *dispatchRequest) getFromImage(shlex *shell.Lex, name string, stageOS string) (builder.Image, error) { name, err := d.getExpandedImageName(shlex, name) if err != nil { return nil, err } - return d.getImageOrStage(name) + return d.getImageOrStage(name, stageOS) } func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { @@ -264,8 +288,7 @@ func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { runConfig := d.state.runConfig var err error - baseImageOS := system.ParsePlatform(d.state.operatingSystem).OS - runConfig.WorkingDir, err = normalizeWorkdir(baseImageOS, runConfig.WorkingDir, c.Path) + runConfig.WorkingDir, err = normalizeWorkdir(d.state.operatingSystem, runConfig.WorkingDir, c.Path) if err != nil { return err } @@ -281,7 +304,7 @@ func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { } comment := "WORKDIR " + runConfig.WorkingDir - runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, baseImageOS)) + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, d.state.operatingSystem)) containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd) if err != nil || containerID == "" { return err @@ -397,8 +420,7 @@ func prependEnvOnCmd(buildArgs *buildArgs, buildArgVars []string, cmd strslice.S // func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { runConfig := d.state.runConfig - optionsOS := system.ParsePlatform(d.builder.options.Platform).OS - cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, optionsOS) + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) runConfig.Cmd = cmd // set config as already being escaped, this prevents double escaping on windows runConfig.ArgsEscaped = true @@ -441,8 +463,7 @@ func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) // func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error { runConfig := d.state.runConfig - optionsOS := system.ParsePlatform(d.builder.options.Platform).OS - cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, optionsOS) + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) runConfig.Entrypoint = cmd if !d.state.cmdSet { runConfig.Cmd = nil diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go index 6d52e7e61..6ddde82d4 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_test.go @@ -16,8 +16,8 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/pkg/system" "github.com/docker/go-connections/nat" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func newBuilderWithMockBackend() *Builder { @@ -49,13 +49,13 @@ func TestEnv2Variables(t *testing.T) { }, } err := dispatch(sb, envCommand) - require.NoError(t, err) + assert.NilError(t, err) expected := []string{ "var1=val1", "var2=val2", } - assert.Equal(t, expected, sb.state.runConfig.Env) + assert.Check(t, is.DeepEqual(expected, sb.state.runConfig.Env)) } func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { @@ -68,12 +68,12 @@ func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { }, } err := dispatch(sb, envCommand) - require.NoError(t, err) + assert.NilError(t, err) expected := []string{ "var1=val1", "var2=fromenv", } - assert.Equal(t, expected, sb.state.runConfig.Env) + assert.Check(t, is.DeepEqual(expected, sb.state.runConfig.Env)) } func TestMaintainer(t *testing.T) { @@ -82,8 +82,8 @@ func TestMaintainer(t *testing.T) { sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults()) cmd := &instructions.MaintainerCommand{Maintainer: maintainerEntry} err := dispatch(sb, cmd) - require.NoError(t, err) - assert.Equal(t, maintainerEntry, sb.state.maintainer) + assert.NilError(t, err) + assert.Check(t, is.Equal(maintainerEntry, sb.state.maintainer)) } func TestLabel(t *testing.T) { @@ -98,10 +98,10 @@ func TestLabel(t *testing.T) { }, } err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) - require.Contains(t, sb.state.runConfig.Labels, labelName) - assert.Equal(t, sb.state.runConfig.Labels[labelName], labelValue) + assert.Assert(t, is.Contains(sb.state.runConfig.Labels, labelName)) + assert.Check(t, is.Equal(sb.state.runConfig.Labels[labelName], labelValue)) } func TestFromScratch(t *testing.T) { @@ -113,22 +113,22 @@ func TestFromScratch(t *testing.T) { err := initializeStage(sb, cmd) if runtime.GOOS == "windows" && !system.LCOWSupported() { - assert.EqualError(t, err, "Windows does not support FROM scratch") + assert.Check(t, is.Error(err, "Windows does not support FROM scratch")) return } - require.NoError(t, err) - assert.True(t, sb.state.hasFromImage()) - assert.Equal(t, "", sb.state.imageID) + assert.NilError(t, err) + assert.Check(t, sb.state.hasFromImage()) + assert.Check(t, is.Equal("", sb.state.imageID)) expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS) - assert.Equal(t, []string{expected}, sb.state.runConfig.Env) + assert.Check(t, is.DeepEqual([]string{expected}, sb.state.runConfig.Env)) } func TestFromWithArg(t *testing.T) { tag, expected := ":sometag", "expectedthisid" getImage := func(name string) (builder.Image, builder.ROLayer, error) { - assert.Equal(t, "alpine"+tag, name) + assert.Check(t, is.Equal("alpine"+tag, name)) return &mockImage{id: "expectedthisid"}, nil, nil } b := newBuilderWithMockBackend() @@ -146,21 +146,21 @@ func TestFromWithArg(t *testing.T) { err := processMetaArg(metaArg, shell.NewLex('\\'), args) sb := newDispatchRequest(b, '\\', nil, args, newStagesBuildResults()) - require.NoError(t, err) + assert.NilError(t, err) err = initializeStage(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, expected, sb.state.imageID) - assert.Equal(t, expected, sb.state.baseImage.ImageID()) - assert.Len(t, sb.state.buildArgs.GetAllAllowed(), 0) - assert.Len(t, sb.state.buildArgs.GetAllMeta(), 1) + assert.Check(t, is.Equal(expected, sb.state.imageID)) + assert.Check(t, is.Equal(expected, sb.state.baseImage.ImageID())) + assert.Check(t, is.Len(sb.state.buildArgs.GetAllAllowed(), 0)) + assert.Check(t, is.Len(sb.state.buildArgs.GetAllMeta(), 1)) } func TestFromWithUndefinedArg(t *testing.T) { tag, expected := "sometag", "expectedthisid" getImage := func(name string) (builder.Image, builder.ROLayer, error) { - assert.Equal(t, "alpine", name) + assert.Check(t, is.Equal("alpine", name)) return &mockImage{id: "expectedthisid"}, nil, nil } b := newBuilderWithMockBackend() @@ -173,8 +173,8 @@ func TestFromWithUndefinedArg(t *testing.T) { BaseName: "alpine${THETAG}", } err := initializeStage(sb, cmd) - require.NoError(t, err) - assert.Equal(t, expected, sb.state.imageID) + assert.NilError(t, err) + assert.Check(t, is.Equal(expected, sb.state.imageID)) } func TestFromMultiStageWithNamedStage(t *testing.T) { @@ -185,13 +185,13 @@ func TestFromMultiStageWithNamedStage(t *testing.T) { firstSB := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), previousResults) secondSB := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), previousResults) err := initializeStage(firstSB, firstFrom) - require.NoError(t, err) - assert.True(t, firstSB.state.hasFromImage()) + assert.NilError(t, err) + assert.Check(t, firstSB.state.hasFromImage()) previousResults.indexed["base"] = firstSB.state.runConfig previousResults.flat = append(previousResults.flat, firstSB.state.runConfig) err = initializeStage(secondSB, secondFrom) - require.NoError(t, err) - assert.True(t, secondSB.state.hasFromImage()) + assert.NilError(t, err) + assert.Check(t, secondSB.state.hasFromImage()) } func TestOnbuild(t *testing.T) { @@ -201,13 +201,14 @@ func TestOnbuild(t *testing.T) { Expression: "ADD . /app/src", } err := dispatch(sb, cmd) - require.NoError(t, err) - assert.Equal(t, "ADD . /app/src", sb.state.runConfig.OnBuild[0]) + assert.NilError(t, err) + assert.Check(t, is.Equal("ADD . /app/src", sb.state.runConfig.OnBuild[0])) } func TestWorkdir(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} workingDir := "/app" if runtime.GOOS == "windows" { workingDir = "C:\\app" @@ -217,13 +218,14 @@ func TestWorkdir(t *testing.T) { } err := dispatch(sb, cmd) - require.NoError(t, err) - assert.Equal(t, workingDir, sb.state.runConfig.WorkingDir) + assert.NilError(t, err) + assert.Check(t, is.Equal(workingDir, sb.state.runConfig.WorkingDir)) } func TestCmd(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} command := "./executable" cmd := &instructions.CmdCommand{ @@ -233,7 +235,7 @@ func TestCmd(t *testing.T) { }, } err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) var expectedCommand strslice.StrSlice if runtime.GOOS == "windows" { @@ -242,8 +244,8 @@ func TestCmd(t *testing.T) { expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command)) } - assert.Equal(t, expectedCommand, sb.state.runConfig.Cmd) - assert.True(t, sb.state.cmdSet) + assert.Check(t, is.DeepEqual(expectedCommand, sb.state.runConfig.Cmd)) + assert.Check(t, sb.state.cmdSet) } func TestHealthcheckNone(t *testing.T) { @@ -255,10 +257,10 @@ func TestHealthcheckNone(t *testing.T) { }, } err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) - require.NotNil(t, sb.state.runConfig.Healthcheck) - assert.Equal(t, []string{"NONE"}, sb.state.runConfig.Healthcheck.Test) + assert.Assert(t, sb.state.runConfig.Healthcheck != nil) + assert.Check(t, is.DeepEqual([]string{"NONE"}, sb.state.runConfig.Healthcheck.Test)) } func TestHealthcheckCmd(t *testing.T) { @@ -272,15 +274,16 @@ func TestHealthcheckCmd(t *testing.T) { }, } err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) - require.NotNil(t, sb.state.runConfig.Healthcheck) - assert.Equal(t, expectedTest, sb.state.runConfig.Healthcheck.Test) + assert.Assert(t, sb.state.runConfig.Healthcheck != nil) + assert.Check(t, is.DeepEqual(expectedTest, sb.state.runConfig.Healthcheck.Test)) } func TestEntrypoint(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} entrypointCmd := "/usr/sbin/nginx" cmd := &instructions.EntrypointCommand{ @@ -290,8 +293,8 @@ func TestEntrypoint(t *testing.T) { }, } err := dispatch(sb, cmd) - require.NoError(t, err) - require.NotNil(t, sb.state.runConfig.Entrypoint) + assert.NilError(t, err) + assert.Assert(t, sb.state.runConfig.Entrypoint != nil) var expectedEntrypoint strslice.StrSlice if runtime.GOOS == "windows" { @@ -299,7 +302,7 @@ func TestEntrypoint(t *testing.T) { } else { expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd)) } - assert.Equal(t, expectedEntrypoint, sb.state.runConfig.Entrypoint) + assert.Check(t, is.DeepEqual(expectedEntrypoint, sb.state.runConfig.Entrypoint)) } func TestExpose(t *testing.T) { @@ -311,14 +314,14 @@ func TestExpose(t *testing.T) { Ports: []string{exposedPort}, } err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) - require.NotNil(t, sb.state.runConfig.ExposedPorts) - require.Len(t, sb.state.runConfig.ExposedPorts, 1) + assert.Assert(t, sb.state.runConfig.ExposedPorts != nil) + assert.Assert(t, is.Len(sb.state.runConfig.ExposedPorts, 1)) portsMapping, err := nat.ParsePortSpec(exposedPort) - require.NoError(t, err) - assert.Contains(t, sb.state.runConfig.ExposedPorts, portsMapping[0].Port) + assert.NilError(t, err) + assert.Check(t, is.Contains(sb.state.runConfig.ExposedPorts, portsMapping[0].Port)) } func TestUser(t *testing.T) { @@ -329,8 +332,8 @@ func TestUser(t *testing.T) { User: "test", } err := dispatch(sb, cmd) - require.NoError(t, err) - assert.Equal(t, "test", sb.state.runConfig.User) + assert.NilError(t, err) + assert.Check(t, is.Equal("test", sb.state.runConfig.User)) } func TestVolume(t *testing.T) { @@ -343,10 +346,10 @@ func TestVolume(t *testing.T) { Volumes: []string{exposedVolume}, } err := dispatch(sb, cmd) - require.NoError(t, err) - require.NotNil(t, sb.state.runConfig.Volumes) - assert.Len(t, sb.state.runConfig.Volumes, 1) - assert.Contains(t, sb.state.runConfig.Volumes, exposedVolume) + assert.NilError(t, err) + assert.Assert(t, sb.state.runConfig.Volumes != nil) + assert.Check(t, is.Len(sb.state.runConfig.Volumes, 1)) + assert.Check(t, is.Contains(sb.state.runConfig.Volumes, exposedVolume)) } func TestStopSignal(t *testing.T) { @@ -356,14 +359,15 @@ func TestStopSignal(t *testing.T) { } b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults()) + sb.state.baseImage = &mockImage{} signal := "SIGKILL" cmd := &instructions.StopSignalCommand{ Signal: signal, } err := dispatch(sb, cmd) - require.NoError(t, err) - assert.Equal(t, signal, sb.state.runConfig.StopSignal) + assert.NilError(t, err) + assert.Check(t, is.Equal(signal, sb.state.runConfig.StopSignal)) } func TestArg(t *testing.T) { @@ -374,10 +378,10 @@ func TestArg(t *testing.T) { argVal := "bar" cmd := &instructions.ArgCommand{Key: argName, Value: &argVal} err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) expected := map[string]string{argName: argVal} - assert.Equal(t, expected, sb.state.buildArgs.GetAllAllowed()) + assert.Check(t, is.DeepEqual(expected, sb.state.buildArgs.GetAllAllowed())) } func TestShell(t *testing.T) { @@ -388,10 +392,10 @@ func TestShell(t *testing.T) { cmd := &instructions.ShellCommand{Shell: strslice.StrSlice{shellCmd}} err := dispatch(sb, cmd) - require.NoError(t, err) + assert.NilError(t, err) expectedShell := strslice.StrSlice([]string{shellCmd}) - assert.Equal(t, expectedShell, sb.state.runConfig.Shell) + assert.Check(t, is.DeepEqual(expectedShell, sb.state.runConfig.Shell)) } func TestPrependEnvOnCmd(t *testing.T) { @@ -403,7 +407,7 @@ func TestPrependEnvOnCmd(t *testing.T) { cmdWithEnv := prependEnvOnCmd(buildArgs, args, cmd) expected := strslice.StrSlice([]string{ "|3", "NO_PROXY=YA", "args=not", "sorted=nope", "foo", "bar"}) - assert.Equal(t, expected, cmdWithEnv) + assert.Check(t, is.DeepEqual(expected, cmdWithEnv)) } func TestRunWithBuildArgs(t *testing.T) { @@ -422,8 +426,8 @@ func TestRunWithBuildArgs(t *testing.T) { imageCache := &mockImageCache{ getCacheFunc: func(parentID string, cfg *container.Config) (string, error) { // Check the runConfig.Cmd sent to probeCache() - assert.Equal(t, cachedCmd, cfg.Cmd) - assert.Equal(t, strslice.StrSlice(nil), cfg.Entrypoint) + assert.Check(t, is.DeepEqual(cachedCmd, cfg.Cmd)) + assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Entrypoint)) return "", nil }, } @@ -441,21 +445,21 @@ func TestRunWithBuildArgs(t *testing.T) { } mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) { // Check the runConfig.Cmd sent to create() - assert.Equal(t, cmdWithShell, config.Config.Cmd) - assert.Contains(t, config.Config.Env, "one=two") - assert.Equal(t, strslice.StrSlice{""}, config.Config.Entrypoint) + assert.Check(t, is.DeepEqual(cmdWithShell, config.Config.Cmd)) + assert.Check(t, is.Contains(config.Config.Env, "one=two")) + assert.Check(t, is.DeepEqual(strslice.StrSlice{""}, config.Config.Entrypoint)) return container.ContainerCreateCreatedBody{ID: "12345"}, nil } mockBackend.commitFunc = func(cfg backend.CommitConfig) (image.ID, error) { // Check the runConfig.Cmd sent to commit() - assert.Equal(t, origCmd, cfg.Config.Cmd) - assert.Equal(t, cachedCmd, cfg.ContainerConfig.Cmd) - assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint) + assert.Check(t, is.DeepEqual(origCmd, cfg.Config.Cmd)) + assert.Check(t, is.DeepEqual(cachedCmd, cfg.ContainerConfig.Cmd)) + assert.Check(t, is.DeepEqual(strslice.StrSlice(nil), cfg.Config.Entrypoint)) return "", nil } from := &instructions.Stage{BaseName: "abcdef"} err := initializeStage(sb, from) - require.NoError(t, err) + assert.NilError(t, err) sb.state.buildArgs.AddArg("one", strPtr("two")) run := &instructions.RunCommand{ ShellDependantCmdLine: instructions.ShellDependantCmdLine{ @@ -463,8 +467,8 @@ func TestRunWithBuildArgs(t *testing.T) { PrependShell: true, }, } - require.NoError(t, dispatch(sb, run)) + assert.NilError(t, dispatch(sb, run)) // Check that runConfig.Cmd has not been modified by run - assert.Equal(t, origCmd, sb.state.runConfig.Cmd) + assert.Check(t, is.DeepEqual(origCmd, sb.state.runConfig.Cmd)) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go index 74264faf2..0f7684508 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -37,8 +37,7 @@ import ( func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { if c, ok := cmd.(instructions.PlatformSpecific); ok { - optionsOS := system.ParsePlatform(d.builder.options.Platform).OS - err := c.CheckPlatform(optionsOS) + err := c.CheckPlatform(d.state.operatingSystem) if err != nil { return errdefs.InvalidParameter(err) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go index 0d4af384a..fd2b94239 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -6,13 +6,12 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" dockerimage "github.com/docker/docker/image" - "github.com/docker/docker/pkg/system" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context" ) -type getAndMountFunc func(string, bool) (builder.Image, builder.ROLayer, error) +type getAndMountFunc func(string, bool, string) (builder.Image, builder.ROLayer, error) // imageSources mounts images and provides a cache for mounted images. It tracks // all images so they can be unmounted at the end of the build. @@ -23,7 +22,7 @@ type imageSources struct { } func newImageSources(ctx context.Context, options builderOptions) *imageSources { - getAndMount := func(idOrRef string, localOnly bool) (builder.Image, builder.ROLayer, error) { + getAndMount := func(idOrRef string, localOnly bool, osForPull string) (builder.Image, builder.ROLayer, error) { pullOption := backend.PullOptionNoPull if !localOnly { if options.Options.PullParent { @@ -32,12 +31,11 @@ func newImageSources(ctx context.Context, options builderOptions) *imageSources pullOption = backend.PullOptionPreferLocal } } - optionsPlatform := system.ParsePlatform(options.Options.Platform) return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ PullOption: pullOption, AuthConfig: options.Options.AuthConfigs, Output: options.ProgressWriter.Output, - OS: optionsPlatform.OS, + OS: osForPull, }) } @@ -47,12 +45,12 @@ func newImageSources(ctx context.Context, options builderOptions) *imageSources } } -func (m *imageSources) Get(idOrRef string, localOnly bool) (*imageMount, error) { +func (m *imageSources) Get(idOrRef string, localOnly bool, osForPull string) (*imageMount, error) { if im, ok := m.byImageID[idOrRef]; ok { return im, nil } - image, layer, err := m.getImage(idOrRef, localOnly) + image, layer, err := m.getImage(idOrRef, localOnly, osForPull) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go b/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go index d4f55ceb4..a10140cf0 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go @@ -7,6 +7,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" + specs "github.com/opencontainers/image-spec/specs-go/v1" ) // KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) @@ -361,6 +362,7 @@ type Stage struct { Commands []Command BaseName string SourceCode string + Platform specs.Platform } // AddCommand to the stage diff --git a/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go b/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go index 9226f4d46..e2d69a488 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" "github.com/pkg/errors" ) @@ -271,16 +272,17 @@ func parseFrom(req parseRequest) (*Stage, error) { return nil, err } + flPlatform := req.flags.AddString("platform", "") if err := req.flags.Parse(); err != nil { return nil, err } code := strings.TrimSpace(req.original) - return &Stage{ BaseName: req.args[0], Name: stageName, SourceCode: code, Commands: []Command{}, + Platform: *system.ParsePlatform(flPlatform.Value), }, nil } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse_test.go b/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse_test.go index ffd6d4f45..242630f72 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse_test.go @@ -7,8 +7,8 @@ import ( "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestCommandsExactlyOneArgument(t *testing.T) { @@ -21,9 +21,9 @@ func TestCommandsExactlyOneArgument(t *testing.T) { for _, command := range commands { ast, err := parser.Parse(strings.NewReader(command)) - require.NoError(t, err) + assert.NilError(t, err) _, err = ParseInstruction(ast.AST.Children[0]) - assert.EqualError(t, err, errExactlyOneArgument(command).Error()) + assert.Check(t, is.Error(err, errExactlyOneArgument(command).Error())) } } @@ -39,9 +39,9 @@ func TestCommandsAtLeastOneArgument(t *testing.T) { for _, command := range commands { ast, err := parser.Parse(strings.NewReader(command)) - require.NoError(t, err) + assert.NilError(t, err) _, err = ParseInstruction(ast.AST.Children[0]) - assert.EqualError(t, err, errAtLeastOneArgument(command).Error()) + assert.Check(t, is.Error(err, errAtLeastOneArgument(command).Error())) } } @@ -53,9 +53,9 @@ func TestCommandsNoDestinationArgument(t *testing.T) { for _, command := range commands { ast, err := parser.Parse(strings.NewReader(command + " arg1")) - require.NoError(t, err) + assert.NilError(t, err) _, err = ParseInstruction(ast.AST.Children[0]) - assert.EqualError(t, err, errNoDestinationArgument(command).Error()) + assert.Check(t, is.Error(err, errNoDestinationArgument(command).Error())) } } @@ -80,7 +80,7 @@ func TestCommandsTooManyArguments(t *testing.T) { }, } _, err := ParseInstruction(node) - assert.EqualError(t, err, errTooManyArguments(command).Error()) + assert.Check(t, is.Error(err, errTooManyArguments(command).Error())) } } @@ -102,7 +102,7 @@ func TestCommandsBlankNames(t *testing.T) { }, } _, err := ParseInstruction(node) - assert.EqualError(t, err, errBlankCommandNames(command).Error()) + assert.Check(t, is.Error(err, errBlankCommandNames(command).Error())) } } @@ -120,11 +120,11 @@ func TestHealthCheckCmd(t *testing.T) { }, } cmd, err := ParseInstruction(node) - assert.NoError(t, err) + assert.Check(t, err) hc, ok := cmd.(*HealthCheckCommand) - assert.True(t, ok) + assert.Check(t, ok) expected := []string{"CMD-SHELL", "hello world"} - assert.Equal(t, expected, hc.Health.Test) + assert.Check(t, is.DeepEqual(expected, hc.Health.Test)) } func TestParseOptInterval(t *testing.T) { @@ -138,7 +138,7 @@ func TestParseOptInterval(t *testing.T) { flInterval.Value = "1ms" _, err = parseOptInterval(flInterval) - require.NoError(t, err) + assert.NilError(t, err) } func TestErrorCases(t *testing.T) { @@ -196,5 +196,4 @@ func TestErrorCases(t *testing.T) { _, err = ParseInstruction(n) testutil.ErrorContains(t, err, c.expectedError) } - } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go index c8b34f8f6..53748f061 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -83,8 +83,7 @@ func (b *Builder) commit(dispatchState *dispatchState, comment string) error { return errors.New("Please provide a source image with `from` prior to commit") } - optionsPlatform := system.ParsePlatform(b.options.Platform) - runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, optionsPlatform.OS)) + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, dispatchState.operatingSystem)) hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd) if err != nil || hit { return err @@ -164,16 +163,15 @@ func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest) // TODO: should this have been using origPaths instead of srcHash in the comment? - optionsPlatform := system.ParsePlatform(b.options.Platform) runConfigWithCommentCmd := copyRunConfig( state.runConfig, - withCmdCommentString(commentStr, optionsPlatform.OS)) + withCmdCommentString(commentStr, state.operatingSystem)) hit, err := b.probeCache(state, runConfigWithCommentCmd) if err != nil || hit { return err } - imageMount, err := b.imageSources.Get(state.imageID, true) + imageMount, err := b.imageSources.Get(state.imageID, true, state.operatingSystem) if err != nil { return errors.Wrapf(err, "failed to get destination image %q", state.imageID) } @@ -184,7 +182,7 @@ func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error } defer rwLayer.Release() - destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, b.options.Platform) + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem) if err != nil { return err } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go index 08067f857..c244ddfe3 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux_test.go @@ -6,8 +6,8 @@ import ( "testing" "github.com/docker/docker/pkg/idtools" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestChownFlagParsing(t *testing.T) { @@ -99,8 +99,8 @@ othergrp:x:6666: } { t.Run(testcase.name, func(t *testing.T) { idPair, err := parseChownFlag(testcase.chownStr, contextDir, testcase.idMapping) - require.NoError(t, err, "Failed to parse chown flag: %q", testcase.chownStr) - assert.Equal(t, testcase.expected, idPair, "chown flag mapping failure") + assert.NilError(t, err, "Failed to parse chown flag: %q", testcase.chownStr) + assert.Check(t, is.DeepEqual(testcase.expected, idPair), "chown flag mapping failure") }) } @@ -132,7 +132,7 @@ othergrp:x:6666: } { t.Run(testcase.name, func(t *testing.T) { _, err := parseChownFlag(testcase.chownStr, contextDir, testcase.idMapping) - assert.EqualError(t, err, testcase.descr, "Expected error string doesn't match") + assert.Check(t, is.Error(err, testcase.descr), "Expected error string doesn't match") }) } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go index 24103ecd8..ae2002683 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_test.go @@ -12,8 +12,8 @@ import ( "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/pkg/archive" "github.com/docker/go-connections/nat" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestEmptyDockerfile(t *testing.T) { @@ -60,7 +60,7 @@ func TestNonExistingDockerfile(t *testing.T) { func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, expectedError string) { tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - require.NoError(t, err) + assert.NilError(t, err) defer func() { if err = tarStream.Close(); err != nil { @@ -77,7 +77,7 @@ func readAndCheckDockerfile(t *testing.T, testName, contextDir, dockerfilePath, Source: tarStream, } _, _, err = remotecontext.Detect(config) - assert.EqualError(t, err, expectedError) + assert.Check(t, is.Error(err, expectedError)) } func TestCopyRunConfig(t *testing.T) { @@ -124,9 +124,9 @@ func TestCopyRunConfig(t *testing.T) { Env: defaultEnv, } runConfigCopy := copyRunConfig(runConfig, testcase.modifiers...) - assert.Equal(t, testcase.expected, runConfigCopy, testcase.doc) + assert.Check(t, is.DeepEqual(testcase.expected, runConfigCopy), testcase.doc) // Assert the original was not modified - assert.NotEqual(t, runConfig, runConfigCopy, testcase.doc) + assert.Check(t, runConfig != runConfigCopy, testcase.doc) } } @@ -156,7 +156,7 @@ func fullMutableRunConfig() *container.Config { func TestDeepCopyRunConfig(t *testing.T) { runConfig := fullMutableRunConfig() copy := copyRunConfig(runConfig) - assert.Equal(t, fullMutableRunConfig(), copy) + assert.Check(t, is.DeepEqual(fullMutableRunConfig(), copy)) copy.Cmd[1] = "arg2" copy.Env[1] = "env2=new" @@ -166,5 +166,5 @@ func TestDeepCopyRunConfig(t *testing.T) { copy.OnBuild[0] = "start" copy.Labels["label3"] = "value3" copy.Shell[0] = "sh" - assert.Equal(t, fullMutableRunConfig(), runConfig) + assert.Check(t, is.DeepEqual(fullMutableRunConfig(), runConfig)) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go index 08f394ac6..1fc55c075 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows_test.go @@ -7,7 +7,8 @@ import ( "testing" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestNormalizeDest(t *testing.T) { @@ -42,10 +43,10 @@ func TestNormalizeDest(t *testing.T) { msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested) actual, err := normalizeDest(testcase.current, testcase.requested, "windows") if testcase.etext == "" { - if !assert.NoError(t, err, msg) { + if !assert.Check(t, err, msg) { continue } - assert.Equal(t, testcase.expected, actual, msg) + assert.Check(t, is.Equal(testcase.expected, actual), msg) } else { testutil.ErrorContains(t, err, testcase.etext) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go index 8ce6a7ad6..50b8d03c2 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/line_parsers_test.go @@ -3,25 +3,29 @@ package parser // import "github.com/docker/docker/builder/dockerfile/parser" import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/google/go-cmp/cmp" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestParseNameValOldFormat(t *testing.T) { directive := Directive{} node, err := parseNameVal("foo bar", "LABEL", &directive) - assert.NoError(t, err) + assert.Check(t, err) expected := &Node{ Value: "foo", Next: &Node{Value: "bar"}, } - assert.Equal(t, expected, node) + assert.DeepEqual(t, expected, node, cmpNodeOpt) } +var cmpNodeOpt = cmp.AllowUnexported(Node{}) + func TestParseNameValNewFormat(t *testing.T) { directive := Directive{} node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive) - assert.NoError(t, err) + assert.Check(t, err) expected := &Node{ Value: "foo", @@ -35,7 +39,7 @@ func TestParseNameValNewFormat(t *testing.T) { }, }, } - assert.Equal(t, expected, node) + assert.DeepEqual(t, expected, node, cmpNodeOpt) } func TestNodeFromLabels(t *testing.T) { @@ -61,8 +65,7 @@ func TestNodeFromLabels(t *testing.T) { } node := NodeFromLabels(labels) - assert.Equal(t, expected, node) - + assert.DeepEqual(t, expected, node, cmpNodeOpt) } func TestParseNameValWithoutVal(t *testing.T) { @@ -70,5 +73,5 @@ func TestParseNameValWithoutVal(t *testing.T) { // In Config.Env, a variable without `=` is removed from the environment. (#31634) // However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922) _, err := parseNameVal("foo", "ENV", &directive) - assert.Error(t, err, "ENV must have two arguments") + assert.Check(t, is.ErrorContains(err, ""), "ENV must have two arguments") } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go index 277176ee1..b065b8a4e 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -7,13 +7,11 @@ import ( "fmt" "io" "regexp" - "runtime" "strconv" "strings" "unicode" "github.com/docker/docker/builder/dockerfile/command" - "github.com/docker/docker/pkg/system" "github.com/pkg/errors" ) @@ -81,11 +79,10 @@ func (node *Node) AddChild(child *Node, startLine, endLine int) { } var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P.*)$`) - tokenComment = regexp.MustCompile(`^#.*$`) + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) ) // DefaultEscapeToken is the default escape token @@ -95,11 +92,9 @@ const DefaultEscapeToken = '\\' // parsing directives. type Directive struct { escapeToken rune // Current escape token - platformToken string // Current platform token lineContinuationRegex *regexp.Regexp // Current line continuation regex processingComplete bool // Whether we are done looking for directives escapeSeen bool // Whether the escape directive has been seen - platformSeen bool // Whether the platform directive has been seen } // setEscapeToken sets the default token for escaping characters in a Dockerfile. @@ -112,25 +107,9 @@ func (d *Directive) setEscapeToken(s string) error { return nil } -// setPlatformToken sets the default platform for pulling images in a Dockerfile. -func (d *Directive) setPlatformToken(s string) error { - s = strings.ToLower(s) - valid := []string{runtime.GOOS} - if system.LCOWSupported() { - valid = append(valid, "linux") - } - for _, item := range valid { - if s == item { - d.platformToken = s - return nil - } - } - return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid) -} - -// possibleParserDirective looks for one or more parser directives '# escapeToken=' and -// '# platform='. Parser directives must precede any builder instruction -// or other comments, and cannot be repeated. +// possibleParserDirective looks for parser directives, eg '# escapeToken='. +// Parser directives must precede any builder instruction or other comments, +// and cannot be repeated. func (d *Directive) possibleParserDirective(line string) error { if d.processingComplete { return nil @@ -149,22 +128,6 @@ func (d *Directive) possibleParserDirective(line string) error { } } - // Only recognise a platform token if LCOW is supported - if system.LCOWSupported() { - tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tpcMatch) != 0 { - for i, n := range tokenPlatformCommand.SubexpNames() { - if n == "platform" { - if d.platformSeen { - return errors.New("only one platform parser directive can be used") - } - d.platformSeen = true - return d.setPlatformToken(tpcMatch[i]) - } - } - } - } - d.processingComplete = true return nil } @@ -237,10 +200,7 @@ func newNodeFromLine(line string, directive *Directive) (*Node, error) { type Result struct { AST *Node EscapeToken rune - // TODO @jhowardmsft - see https://github.com/moby/moby/issues/34617 - // This next field will be removed in a future update for LCOW support. - OS string - Warnings []string + Warnings []string } // PrintWarnings to the writer @@ -320,7 +280,6 @@ func Parse(rwc io.Reader) (*Result, error) { AST: root, Warnings: warnings, EscapeToken: d.escapeToken, - OS: d.platformToken, }, handleScannerError(scanner.Err()) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go index 807ac1b09..10bed1f75 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser_test.go @@ -11,8 +11,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) const testDir = "testfiles" @@ -21,11 +21,11 @@ const testFileLineInfo = "testfile-line/Dockerfile" func getDirs(t *testing.T, dir string) []string { f, err := os.Open(dir) - require.NoError(t, err) + assert.NilError(t, err) defer f.Close() dirs, err := f.Readdirnames(0) - require.NoError(t, err) + assert.NilError(t, err) return dirs } @@ -34,11 +34,11 @@ func TestParseErrorCases(t *testing.T) { dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") df, err := os.Open(dockerfile) - require.NoError(t, err, dockerfile) + assert.NilError(t, err, dockerfile) defer df.Close() _, err = Parse(df) - assert.Error(t, err, dockerfile) + assert.Check(t, is.ErrorContains(err, ""), dockerfile) } } @@ -48,20 +48,20 @@ func TestParseCases(t *testing.T) { resultfile := filepath.Join(testDir, dir, "result") df, err := os.Open(dockerfile) - require.NoError(t, err, dockerfile) + assert.NilError(t, err, dockerfile) defer df.Close() result, err := Parse(df) - require.NoError(t, err, dockerfile) + assert.NilError(t, err, dockerfile) content, err := ioutil.ReadFile(resultfile) - require.NoError(t, err, resultfile) + assert.NilError(t, err, resultfile) if runtime.GOOS == "windows" { // CRLF --> CR to match Unix behavior content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) } - assert.Equal(t, result.AST.Dump()+"\n", string(content), "In "+dockerfile) + assert.Check(t, is.Equal(result.AST.Dump()+"\n", string(content)), "In "+dockerfile) } } @@ -103,22 +103,22 @@ func TestParseWords(t *testing.T) { for _, test := range tests { words := parseWords(test["input"][0], NewDefaultDirective()) - assert.Equal(t, test["expect"], words) + assert.Check(t, is.DeepEqual(test["expect"], words)) } } func TestParseIncludesLineNumbers(t *testing.T) { df, err := os.Open(testFileLineInfo) - require.NoError(t, err) + assert.NilError(t, err) defer df.Close() result, err := Parse(df) - require.NoError(t, err) + assert.NilError(t, err) ast := result.AST - assert.Equal(t, 5, ast.StartLine) - assert.Equal(t, 31, ast.endLine) - assert.Len(t, ast.Children, 3) + assert.Check(t, is.Equal(5, ast.StartLine)) + assert.Check(t, is.Equal(31, ast.endLine)) + assert.Check(t, is.Len(ast.Children, 3)) expected := [][]int{ {5, 5}, {11, 12}, @@ -126,7 +126,7 @@ func TestParseIncludesLineNumbers(t *testing.T) { } for i, child := range ast.Children { msg := fmt.Sprintf("Child %d", i) - assert.Equal(t, expected[i], []int{child.StartLine, child.endLine}, msg) + assert.Check(t, is.DeepEqual(expected[i], []int{child.StartLine, child.endLine}), msg) } } @@ -153,13 +153,13 @@ RUN indented \ `) result, err := Parse(dockerfile) - require.NoError(t, err) + assert.NilError(t, err) warnings := result.Warnings - assert.Len(t, warnings, 3) - assert.Contains(t, warnings[0], "Empty continuation line found in") - assert.Contains(t, warnings[0], "RUN something following more") - assert.Contains(t, warnings[1], "RUN another thing") - assert.Contains(t, warnings[2], "will become errors in a future release") + assert.Check(t, is.Len(warnings, 3)) + assert.Check(t, is.Contains(warnings[0], "Empty continuation line found in")) + assert.Check(t, is.Contains(warnings[0], "RUN something following more")) + assert.Check(t, is.Contains(warnings[1], "RUN another thing")) + assert.Check(t, is.Contains(warnings[2], "will become errors in a future release")) } func TestParseReturnsScannerErrors(t *testing.T) { @@ -170,5 +170,5 @@ func TestParseReturnsScannerErrors(t *testing.T) { LABEL test=%s `, label)) _, err := Parse(dockerfile) - assert.EqualError(t, err, "dockerfile line greater than max allowed size of 65535") + assert.Check(t, is.Error(err, "dockerfile line greater than max allowed size of 65535")) } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/shell/lex_test.go b/vendor/github.com/docker/docker/builder/dockerfile/shell/lex_test.go index 6932a44e3..7a726ad79 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/shell/lex_test.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/shell/lex_test.go @@ -7,7 +7,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestShellParser4EnvVars(t *testing.T) { @@ -15,7 +16,7 @@ func TestShellParser4EnvVars(t *testing.T) { lineCount := 0 file, err := os.Open(fn) - assert.NoError(t, err) + assert.Check(t, err) defer file.Close() shlex := NewLex('\\') @@ -37,7 +38,7 @@ func TestShellParser4EnvVars(t *testing.T) { } words := strings.Split(line, "|") - assert.Len(t, words, 3) + assert.Check(t, is.Len(words, 3)) platform := strings.TrimSpace(words[0]) source := strings.TrimSpace(words[1]) @@ -52,10 +53,10 @@ func TestShellParser4EnvVars(t *testing.T) { ((platform == "U" || platform == "A") && runtime.GOOS != "windows") { newWord, err := shlex.ProcessWord(source, envs) if expected == "error" { - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) } else { - assert.NoError(t, err) - assert.Equal(t, newWord, expected) + assert.Check(t, err) + assert.Check(t, is.Equal(newWord, expected)) } } } diff --git a/vendor/github.com/docker/docker/builder/fscache/fscache_test.go b/vendor/github.com/docker/docker/builder/fscache/fscache_test.go index 7afee49ed..613070f7b 100644 --- a/vendor/github.com/docker/docker/builder/fscache/fscache_test.go +++ b/vendor/github.com/docker/docker/builder/fscache/fscache_test.go @@ -7,14 +7,15 @@ import ( "testing" "time" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/moby/buildkit/session/filesync" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) func TestFSCache(t *testing.T) { tmpDir, err := ioutil.TempDir("", "fscache") - assert.Nil(t, err) + assert.Check(t, err) defer os.RemoveAll(tmpDir) backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend")) @@ -26,84 +27,84 @@ func TestFSCache(t *testing.T) { } fscache, err := NewFSCache(opt) - assert.Nil(t, err) + assert.Check(t, err) defer fscache.Close() err = fscache.RegisterTransport("test", &testTransport{}) - assert.Nil(t, err) + assert.Check(t, err) src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"}) - assert.Nil(t, err) + assert.Check(t, err) dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo")) - assert.Nil(t, err) - assert.Equal(t, string(dt), "data") + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data")) // same id doesn't recalculate anything src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"}) - assert.Nil(t, err) - assert.Equal(t, src1.Root().Path(), src2.Root().Path()) + assert.Check(t, err) + assert.Check(t, is.Equal(src1.Root().Path(), src2.Root().Path())) dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo")) - assert.Nil(t, err) - assert.Equal(t, string(dt), "data") - assert.Nil(t, src2.Close()) + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data")) + assert.Check(t, src2.Close()) src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"}) - assert.Nil(t, err) - assert.NotEqual(t, src1.Root().Path(), src3.Root().Path()) + assert.Check(t, err) + assert.Check(t, src1.Root().Path() != src3.Root().Path()) dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2")) - assert.Nil(t, err) - assert.Equal(t, string(dt), "data2") + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data2")) s, err := fscache.DiskUsage() - assert.Nil(t, err) - assert.Equal(t, s, int64(0)) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(0))) - assert.Nil(t, src3.Close()) + assert.Check(t, src3.Close()) s, err = fscache.DiskUsage() - assert.Nil(t, err) - assert.Equal(t, s, int64(5)) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(5))) // new upload with the same shared key shoutl overwrite src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"}) - assert.Nil(t, err) - assert.NotEqual(t, src1.Root().Path(), src3.Root().Path()) + assert.Check(t, err) + assert.Check(t, src1.Root().Path() != src3.Root().Path()) dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3")) - assert.Nil(t, err) - assert.Equal(t, string(dt), "data3") - assert.Equal(t, src4.Root().Path(), src3.Root().Path()) - assert.Nil(t, src4.Close()) + assert.Check(t, err) + assert.Check(t, is.Equal(string(dt), "data3")) + assert.Check(t, is.Equal(src4.Root().Path(), src3.Root().Path())) + assert.Check(t, src4.Close()) s, err = fscache.DiskUsage() - assert.Nil(t, err) - assert.Equal(t, s, int64(10)) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(10))) // this one goes over the GC limit src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"}) - assert.Nil(t, err) - assert.Nil(t, src5.Close()) + assert.Check(t, err) + assert.Check(t, src5.Close()) // GC happens async time.Sleep(100 * time.Millisecond) // only last insertion after GC s, err = fscache.DiskUsage() - assert.Nil(t, err) - assert.Equal(t, s, int64(8)) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(8))) // prune deletes everything released, err := fscache.Prune(context.TODO()) - assert.Nil(t, err) - assert.Equal(t, released, uint64(8)) + assert.Check(t, err) + assert.Check(t, is.Equal(released, uint64(8))) s, err = fscache.DiskUsage() - assert.Nil(t, err) - assert.Equal(t, s, int64(0)) + assert.Check(t, err) + assert.Check(t, is.Equal(s, int64(0))) } type testTransport struct { diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go index 4f4d83350..a46675b22 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils_test.go @@ -13,42 +13,39 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/google/go-cmp/cmp" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestParseRemoteURL(t *testing.T) { dir, err := parseRemoteURL("git://github.com/user/repo.git") - require.NoError(t, err) - assert.NotEmpty(t, dir) - assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "master", ""}, dir) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git://github.com/user/repo.git", "master", ""}, dir, cmpGitRepoOpt)) dir, err = parseRemoteURL("git://github.com/user/repo.git#mybranch:mydir/mysubdir/") - require.NoError(t, err) - assert.NotEmpty(t, dir) - assert.Equal(t, gitRepo{"git://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir, cmpGitRepoOpt)) dir, err = parseRemoteURL("https://github.com/user/repo.git") - require.NoError(t, err) - assert.NotEmpty(t, dir) - assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "master", ""}, dir) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"https://github.com/user/repo.git", "master", ""}, dir, cmpGitRepoOpt)) dir, err = parseRemoteURL("https://github.com/user/repo.git#mybranch:mydir/mysubdir/") - require.NoError(t, err) - assert.NotEmpty(t, dir) - assert.Equal(t, gitRepo{"https://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"https://github.com/user/repo.git", "mybranch", "mydir/mysubdir/"}, dir, cmpGitRepoOpt)) dir, err = parseRemoteURL("git@github.com:user/repo.git") - require.NoError(t, err) - assert.NotEmpty(t, dir) - assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "master", ""}, dir) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git@github.com:user/repo.git", "master", ""}, dir, cmpGitRepoOpt)) dir, err = parseRemoteURL("git@github.com:user/repo.git#mybranch:mydir/mysubdir/") - require.NoError(t, err) - assert.NotEmpty(t, dir) - assert.Equal(t, gitRepo{"git@github.com:user/repo.git", "mybranch", "mydir/mysubdir/"}, dir) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(gitRepo{"git@github.com:user/repo.git", "mybranch", "mydir/mysubdir/"}, dir, cmpGitRepoOpt)) } +var cmpGitRepoOpt = cmp.AllowUnexported(gitRepo{}) + func TestCloneArgsSmartHttp(t *testing.T) { mux := http.NewServeMux() server := httptest.NewServer(mux) @@ -63,7 +60,7 @@ func TestCloneArgsSmartHttp(t *testing.T) { args := fetchArgs(serverURL.String(), "master") exp := []string{"fetch", "--depth", "1", "origin", "master"} - assert.Equal(t, exp, args) + assert.Check(t, is.DeepEqual(exp, args)) } func TestCloneArgsDumbHttp(t *testing.T) { @@ -79,13 +76,13 @@ func TestCloneArgsDumbHttp(t *testing.T) { args := fetchArgs(serverURL.String(), "master") exp := []string{"fetch", "origin", "master"} - assert.Equal(t, exp, args) + assert.Check(t, is.DeepEqual(exp, args)) } func TestCloneArgsGit(t *testing.T) { args := fetchArgs("git://github.com/docker/docker", "master") exp := []string{"fetch", "--depth", "1", "origin", "master"} - assert.Equal(t, exp, args) + assert.Check(t, is.DeepEqual(exp, args)) } func gitGetConfig(name string) string { @@ -100,7 +97,7 @@ func gitGetConfig(name string) string { func TestCheckoutGit(t *testing.T) { root, err := ioutil.TempDir("", "docker-build-git-checkout") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(root) autocrlf := gitGetConfig("core.autocrlf") @@ -115,22 +112,22 @@ func TestCheckoutGit(t *testing.T) { gitDir := filepath.Join(root, "repo") _, err = git("init", gitDir) - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test") - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644) - require.NoError(t, err) + assert.NilError(t, err) subDir := filepath.Join(gitDir, "subdir") - require.NoError(t, os.Mkdir(subDir, 0755)) + assert.NilError(t, os.Mkdir(subDir, 0755)) err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644) - require.NoError(t, err) + assert.NilError(t, err) if runtime.GOOS != "windows" { if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { @@ -143,58 +140,58 @@ func TestCheckoutGit(t *testing.T) { } _, err = gitWithinDir(gitDir, "add", "-A") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "commit", "-am", "First commit") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "checkout", "-b", "test") - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644) - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "add", "-A") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "checkout", "master") - require.NoError(t, err) + assert.NilError(t, err) // set up submodule subrepoDir := filepath.Join(root, "subrepo") _, err = git("init", subrepoDir) - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(subrepoDir, "config", "user.email", "test@docker.com") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(subrepoDir, "config", "user.name", "Docker test") - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(subrepoDir, "subfile"), []byte("subcontents"), 0644) - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(subrepoDir, "add", "-A") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(subrepoDir, "commit", "-am", "Subrepo initial") - require.NoError(t, err) + assert.NilError(t, err) cmd := exec.Command("git", "submodule", "add", subrepoDir, "sub") // this command doesn't work with --work-tree cmd.Dir = gitDir - require.NoError(t, cmd.Run()) + assert.NilError(t, cmd.Run()) _, err = gitWithinDir(gitDir, "add", "-A") - require.NoError(t, err) + assert.NilError(t, err) _, err = gitWithinDir(gitDir, "commit", "-am", "With submodule") - require.NoError(t, err) + assert.NilError(t, err) type singleCase struct { frag string @@ -232,24 +229,24 @@ func TestCheckoutGit(t *testing.T) { r, err := cloneGitRepo(gitRepo{remote: gitDir, ref: ref, subdir: subdir}) if c.fail { - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) continue } - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(r) if c.submodule { b, err := ioutil.ReadFile(filepath.Join(r, "sub/subfile")) - require.NoError(t, err) - assert.Equal(t, "subcontents", string(b)) + assert.NilError(t, err) + assert.Check(t, is.Equal("subcontents", string(b))) } else { _, err := os.Stat(filepath.Join(r, "sub/subfile")) - require.Error(t, err) - require.True(t, os.IsNotExist(err)) + assert.Assert(t, is.ErrorContains(err, "")) + assert.Assert(t, os.IsNotExist(err)) } b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) - require.NoError(t, err) - assert.Equal(t, c.exp, string(b)) + assert.NilError(t, err) + assert.Check(t, is.Equal(c.exp, string(b))) } } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go index ff097c2e7..b13429cfa 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/mimetype_test.go @@ -3,14 +3,14 @@ package remotecontext // import "github.com/docker/docker/builder/remotecontext" import ( "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestDetectContentType(t *testing.T) { input := []byte("That is just a plain text") contentType, _, err := detectContentType(input) - require.NoError(t, err) - assert.Equal(t, "text/plain", contentType) + assert.NilError(t, err) + assert.Check(t, is.Equal("text/plain", contentType)) } diff --git a/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go b/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go index 3983bd1b6..5267d2396 100644 --- a/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go +++ b/vendor/github.com/docker/docker/builder/remotecontext/remote_test.go @@ -11,9 +11,9 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic @@ -189,12 +189,12 @@ func TestDownloadRemote(t *testing.T) { mux.Handle("/", http.FileServer(http.Dir(contextDir.Path()))) contentType, content, err := downloadRemote(remoteURL) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, mimeTypes.TextPlain, contentType) + assert.Check(t, is.Equal(mimeTypes.TextPlain, contentType)) raw, err := ioutil.ReadAll(content) - require.NoError(t, err) - assert.Equal(t, dockerfileContents, string(raw)) + assert.NilError(t, err) + assert.Check(t, is.Equal(dockerfileContents, string(raw))) } func TestGetWithStatusError(t *testing.T) { @@ -226,11 +226,11 @@ func TestGetWithStatusError(t *testing.T) { response, err := GetWithStatusError(ts.URL) if testcase.expectedErr == "" { - require.NoError(t, err) + assert.NilError(t, err) body, err := readBody(response.Body) - require.NoError(t, err) - assert.Contains(t, string(body), testcase.expectedBody) + assert.NilError(t, err) + assert.Check(t, is.Contains(string(body), testcase.expectedBody)) } else { testutil.ErrorContains(t, err, testcase.expectedErr) } diff --git a/vendor/github.com/docker/docker/client/client_test.go b/vendor/github.com/docker/docker/client/client_test.go index d6b8f6735..7cca04ac7 100644 --- a/vendor/github.com/docker/docker/client/client_test.go +++ b/vendor/github.com/docker/docker/client/client_test.go @@ -11,10 +11,10 @@ import ( "github.com/docker/docker/api" "github.com/docker/docker/api/types" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/env" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestNewEnvClient(t *testing.T) { @@ -89,19 +89,18 @@ func TestNewEnvClient(t *testing.T) { env.PatchAll(t, c.envs) apiclient, err := NewEnvClient() if c.expectedError != "" { - assert.Error(t, err, c.doc) - assert.Equal(t, c.expectedError, err.Error(), c.doc) + assert.Check(t, is.Error(err, c.expectedError), c.doc) } else { - assert.NoError(t, err, c.doc) + assert.Check(t, err, c.doc) version := apiclient.ClientVersion() - assert.Equal(t, c.expectedVersion, version, c.doc) + assert.Check(t, is.Equal(c.expectedVersion, version), c.doc) } if c.envs["DOCKER_TLS_VERIFY"] != "" { // pedantic checking that this is handled correctly tr := apiclient.client.Transport.(*http.Transport) - assert.NotNil(t, tr.TLSClientConfig, c.doc) - assert.Equal(t, tr.TLSClientConfig.InsecureSkipVerify, false, c.doc) + assert.Assert(t, tr.TLSClientConfig != nil, c.doc) + assert.Check(t, is.Equal(tr.TLSClientConfig.InsecureSkipVerify, false), c.doc) } } } @@ -128,7 +127,7 @@ func TestGetAPIPath(t *testing.T) { for _, testcase := range testcases { c := Client{version: testcase.version, basePath: "/"} actual := c.getAPIPath(testcase.path, testcase.query) - assert.Equal(t, actual, testcase.expected) + assert.Check(t, is.Equal(actual, testcase.expected)) } } @@ -165,7 +164,7 @@ func TestParseHostURL(t *testing.T) { if testcase.expectedErr != "" { testutil.ErrorContains(t, err, testcase.expectedErr) } - assert.Equal(t, testcase.expected, actual) + assert.Check(t, is.DeepEqual(testcase.expected, actual)) } } @@ -181,7 +180,7 @@ func TestNewEnvClientSetsDefaultVersion(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, client.version, api.DefaultVersion) + assert.Check(t, is.Equal(client.version, api.DefaultVersion)) expected := "1.22" os.Setenv("DOCKER_API_VERSION", expected) @@ -189,7 +188,7 @@ func TestNewEnvClientSetsDefaultVersion(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, expected, client.version) + assert.Check(t, is.Equal(expected, client.version)) } // TestNegotiateAPIVersionEmpty asserts that client.Client can @@ -198,7 +197,7 @@ func TestNegotiateAPIVersionEmpty(t *testing.T) { defer env.PatchAll(t, map[string]string{"DOCKER_API_VERSION": ""}) client, err := NewEnvClient() - require.NoError(t, err) + assert.NilError(t, err) ping := types.Ping{ APIVersion: "", @@ -215,14 +214,14 @@ func TestNegotiateAPIVersionEmpty(t *testing.T) { // test downgrade client.NegotiateAPIVersionPing(ping) - assert.Equal(t, expected, client.version) + assert.Check(t, is.Equal(expected, client.version)) } // TestNegotiateAPIVersion asserts that client.Client can // negotiate a compatible APIVersion with the server func TestNegotiateAPIVersion(t *testing.T) { client, err := NewEnvClient() - require.NoError(t, err) + assert.NilError(t, err) expected := "1.21" ping := types.Ping{ @@ -236,14 +235,14 @@ func TestNegotiateAPIVersion(t *testing.T) { // test downgrade client.NegotiateAPIVersionPing(ping) - assert.Equal(t, expected, client.version) + assert.Check(t, is.Equal(expected, client.version)) // set the client version to something older, and verify that we keep the // original setting. expected = "1.20" client.version = expected client.NegotiateAPIVersionPing(ping) - assert.Equal(t, expected, client.version) + assert.Check(t, is.Equal(expected, client.version)) } @@ -254,7 +253,7 @@ func TestNegotiateAPVersionOverride(t *testing.T) { defer env.PatchAll(t, map[string]string{"DOCKER_API_VERSION": expected})() client, err := NewEnvClient() - require.NoError(t, err) + assert.NilError(t, err) ping := types.Ping{ APIVersion: "1.24", @@ -264,7 +263,7 @@ func TestNegotiateAPVersionOverride(t *testing.T) { // test that we honored the env var client.NegotiateAPIVersionPing(ping) - assert.Equal(t, expected, client.version) + assert.Check(t, is.Equal(expected, client.version)) } type roundTripFunc func(*http.Request) (*http.Response, error) @@ -298,7 +297,7 @@ func TestClientRedirect(t *testing.T) { cases := []struct { httpMethod string - expectedErr error + expectedErr *url.Error statusCode int }{ {http.MethodGet, nil, 301}, @@ -309,9 +308,15 @@ func TestClientRedirect(t *testing.T) { for _, tc := range cases { req, err := http.NewRequest(tc.httpMethod, "/redirectme", nil) - assert.NoError(t, err) + assert.Check(t, err) resp, err := client.Do(req) - assert.Equal(t, tc.expectedErr, err) - assert.Equal(t, tc.statusCode, resp.StatusCode) + assert.Check(t, is.Equal(tc.statusCode, resp.StatusCode)) + if tc.expectedErr == nil { + assert.Check(t, is.Nil(err)) + } else { + urlError, ok := err.(*url.Error) + assert.Assert(t, ok, "%T is not *url.Error", err) + assert.Check(t, is.Equal(*tc.expectedErr, *urlError)) + } } } diff --git a/vendor/github.com/docker/docker/client/config_create_test.go b/vendor/github.com/docker/docker/client/config_create_test.go index 3f3cb3fc5..2ee8f1fd4 100644 --- a/vendor/github.com/docker/docker/client/config_create_test.go +++ b/vendor/github.com/docker/docker/client/config_create_test.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -21,7 +22,7 @@ func TestConfigCreateUnsupported(t *testing.T) { client: &http.Client{}, } _, err := client.ConfigCreate(context.Background(), swarm.ConfigSpec{}) - assert.EqualError(t, err, `"config create" requires API version 1.30, but the Docker daemon API version is 1.29`) + assert.Check(t, is.Error(err, `"config create" requires API version 1.30, but the Docker daemon API version is 1.29`)) } func TestConfigCreateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/config_inspect_test.go b/vendor/github.com/docker/docker/client/config_inspect_test.go index c6d73e5c0..9d5af0bf8 100644 --- a/vendor/github.com/docker/docker/client/config_inspect_test.go +++ b/vendor/github.com/docker/docker/client/config_inspect_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -44,7 +45,7 @@ func TestConfigInspectUnsupported(t *testing.T) { client: &http.Client{}, } _, _, err := client.ConfigInspectWithRaw(context.Background(), "nothing") - assert.EqualError(t, err, `"config inspect" requires API version 1.30, but the Docker daemon API version is 1.29`) + assert.Check(t, is.Error(err, `"config inspect" requires API version 1.30, but the Docker daemon API version is 1.29`)) } func TestConfigInspectError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/config_list_test.go b/vendor/github.com/docker/docker/client/config_list_test.go index 4b4a5e84c..0cd99c50d 100644 --- a/vendor/github.com/docker/docker/client/config_list_test.go +++ b/vendor/github.com/docker/docker/client/config_list_test.go @@ -12,7 +12,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -22,7 +23,7 @@ func TestConfigListUnsupported(t *testing.T) { client: &http.Client{}, } _, err := client.ConfigList(context.Background(), types.ConfigListOptions{}) - assert.EqualError(t, err, `"config list" requires API version 1.30, but the Docker daemon API version is 1.29`) + assert.Check(t, is.Error(err, `"config list" requires API version 1.30, but the Docker daemon API version is 1.29`)) } func TestConfigListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/config_remove_test.go b/vendor/github.com/docker/docker/client/config_remove_test.go index 290395aae..25a5c4ac8 100644 --- a/vendor/github.com/docker/docker/client/config_remove_test.go +++ b/vendor/github.com/docker/docker/client/config_remove_test.go @@ -8,7 +8,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -18,7 +19,7 @@ func TestConfigRemoveUnsupported(t *testing.T) { client: &http.Client{}, } err := client.ConfigRemove(context.Background(), "config_id") - assert.EqualError(t, err, `"config remove" requires API version 1.30, but the Docker daemon API version is 1.29`) + assert.Check(t, is.Error(err, `"config remove" requires API version 1.30, but the Docker daemon API version is 1.29`)) } func TestConfigRemoveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/config_update_test.go b/vendor/github.com/docker/docker/client/config_update_test.go index 99f2e173c..a7eea2f8b 100644 --- a/vendor/github.com/docker/docker/client/config_update_test.go +++ b/vendor/github.com/docker/docker/client/config_update_test.go @@ -9,7 +9,8 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -19,7 +20,7 @@ func TestConfigUpdateUnsupported(t *testing.T) { client: &http.Client{}, } err := client.ConfigUpdate(context.Background(), "config_id", swarm.Version{}, swarm.ConfigSpec{}) - assert.EqualError(t, err, `"config update" requires API version 1.30, but the Docker daemon API version is 1.29`) + assert.Check(t, is.Error(err, `"config update" requires API version 1.30, but the Docker daemon API version is 1.29`)) } func TestConfigUpdateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/container_prune_test.go b/vendor/github.com/docker/docker/client/container_prune_test.go index 1f8c22cbf..7ffd9c7ec 100644 --- a/vendor/github.com/docker/docker/client/container_prune_test.go +++ b/vendor/github.com/docker/docker/client/container_prune_test.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -24,7 +25,7 @@ func TestContainersPruneError(t *testing.T) { filters := filters.NewArgs() _, err := client.ContainersPrune(context.Background(), filters) - assert.EqualError(t, err, "Error response from daemon: Server error") + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } func TestContainersPrune(t *testing.T) { @@ -99,7 +100,7 @@ func TestContainersPrune(t *testing.T) { query := req.URL.Query() for key, expected := range listCase.expectedQueryParams { actual := query.Get(key) - assert.Equal(t, expected, actual) + assert.Check(t, is.Equal(expected, actual)) } content, err := json.Marshal(types.ContainersPruneReport{ ContainersDeleted: []string{"container_id1", "container_id2"}, @@ -117,8 +118,8 @@ func TestContainersPrune(t *testing.T) { } report, err := client.ContainersPrune(context.Background(), listCase.filters) - assert.NoError(t, err) - assert.Len(t, report.ContainersDeleted, 2) - assert.Equal(t, uint64(9999), report.SpaceReclaimed) + assert.Check(t, err) + assert.Check(t, is.Len(report.ContainersDeleted, 2)) + assert.Check(t, is.Equal(uint64(9999), report.SpaceReclaimed)) } } diff --git a/vendor/github.com/docker/docker/client/container_remove_test.go b/vendor/github.com/docker/docker/client/container_remove_test.go index 0b1b64fa3..537272cd1 100644 --- a/vendor/github.com/docker/docker/client/container_remove_test.go +++ b/vendor/github.com/docker/docker/client/container_remove_test.go @@ -9,7 +9,8 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -18,7 +19,7 @@ func TestContainerRemoveError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - assert.EqualError(t, err, "Error response from daemon: Server error") + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } func TestContainerRemoveNotFoundError(t *testing.T) { @@ -26,8 +27,8 @@ func TestContainerRemoveNotFoundError(t *testing.T) { client: newMockClient(errorMock(http.StatusNotFound, "missing")), } err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - assert.EqualError(t, err, "Error: No such container: container_id") - assert.True(t, IsErrNotFound(err)) + assert.Check(t, is.Error(err, "Error: No such container: container_id")) + assert.Check(t, IsErrNotFound(err)) } func TestContainerRemove(t *testing.T) { @@ -61,5 +62,5 @@ func TestContainerRemove(t *testing.T) { RemoveVolumes: true, Force: true, }) - assert.NoError(t, err) + assert.Check(t, err) } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect_test.go b/vendor/github.com/docker/docker/client/distribution_inspect_test.go index 90b35a285..d4124bfa1 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect_test.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect_test.go @@ -4,8 +4,9 @@ import ( "net/http" "testing" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -15,7 +16,7 @@ func TestDistributionInspectUnsupported(t *testing.T) { client: &http.Client{}, } _, err := client.DistributionInspect(context.Background(), "foobar:1.0", "") - assert.EqualError(t, err, `"distribution inspect" requires API version 1.30, but the Docker daemon API version is 1.29`) + assert.Check(t, is.Error(err, `"distribution inspect" requires API version 1.30, but the Docker daemon API version is 1.29`)) } func TestDistributionInspectWithEmptyID(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 628adfda6..dab3a7569 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -188,8 +188,14 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, e c, br := clientconn.Hijack() if br.Buffered() > 0 { - // If there is buffered content, wrap the connection - c = &hijackedConn{c, br} + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } } else { br.Reset(nil) } @@ -197,6 +203,10 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, e return c, nil } +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. type hijackedConn struct { net.Conn r *bufio.Reader @@ -205,3 +215,18 @@ type hijackedConn struct { func (c *hijackedConn) Read(b []byte) (int, error) { return c.r.Read(b) } + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_prune_test.go b/vendor/github.com/docker/docker/client/image_prune_test.go index f165e5c74..9b161531f 100644 --- a/vendor/github.com/docker/docker/client/image_prune_test.go +++ b/vendor/github.com/docker/docker/client/image_prune_test.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -24,7 +25,7 @@ func TestImagesPruneError(t *testing.T) { filters := filters.NewArgs() _, err := client.ImagesPrune(context.Background(), filters) - assert.EqualError(t, err, "Error response from daemon: Server error") + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } func TestImagesPrune(t *testing.T) { @@ -87,7 +88,7 @@ func TestImagesPrune(t *testing.T) { query := req.URL.Query() for key, expected := range listCase.expectedQueryParams { actual := query.Get(key) - assert.Equal(t, expected, actual) + assert.Check(t, is.Equal(expected, actual)) } content, err := json.Marshal(types.ImagesPruneReport{ ImagesDeleted: []types.ImageDeleteResponseItem{ @@ -112,8 +113,8 @@ func TestImagesPrune(t *testing.T) { } report, err := client.ImagesPrune(context.Background(), listCase.filters) - assert.NoError(t, err) - assert.Len(t, report.ImagesDeleted, 2) - assert.Equal(t, uint64(9999), report.SpaceReclaimed) + assert.Check(t, err) + assert.Check(t, is.Len(report.ImagesDeleted, 2)) + assert.Check(t, is.Equal(uint64(9999), report.SpaceReclaimed)) } } diff --git a/vendor/github.com/docker/docker/client/image_remove_test.go b/vendor/github.com/docker/docker/client/image_remove_test.go index 8f5aa0120..a1686e649 100644 --- a/vendor/github.com/docker/docker/client/image_remove_test.go +++ b/vendor/github.com/docker/docker/client/image_remove_test.go @@ -10,7 +10,8 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -20,7 +21,7 @@ func TestImageRemoveError(t *testing.T) { } _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) - assert.EqualError(t, err, "Error response from daemon: Server error") + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } func TestImageRemoveImageNotFound(t *testing.T) { @@ -29,8 +30,8 @@ func TestImageRemoveImageNotFound(t *testing.T) { } _, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{}) - assert.EqualError(t, err, "Error: No such image: unknown") - assert.True(t, IsErrNotFound(err)) + assert.Check(t, is.Error(err, "Error: No such image: unknown")) + assert.Check(t, IsErrNotFound(err)) } func TestImageRemove(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/network_inspect_test.go b/vendor/github.com/docker/docker/client/network_inspect_test.go index 703161191..8778021ed 100644 --- a/vendor/github.com/docker/docker/client/network_inspect_test.go +++ b/vendor/github.com/docker/docker/client/network_inspect_test.go @@ -11,8 +11,9 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -22,7 +23,7 @@ func TestNetworkInspectError(t *testing.T) { } _, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{}) - assert.EqualError(t, err, "Error response from daemon: Server error") + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } func TestNetworkInspectNotFoundError(t *testing.T) { @@ -31,8 +32,8 @@ func TestNetworkInspectNotFoundError(t *testing.T) { } _, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{}) - assert.EqualError(t, err, "Error: No such network: unknown") - assert.True(t, IsErrNotFound(err)) + assert.Check(t, is.Error(err, "Error: No such network: unknown")) + assert.Check(t, IsErrNotFound(err)) } func TestNetworkInspectWithEmptyID(t *testing.T) { @@ -113,5 +114,5 @@ func TestNetworkInspect(t *testing.T) { } _, err = client.NetworkInspect(context.Background(), "network_id", types.NetworkInspectOptions{Scope: "global"}) - assert.EqualError(t, err, "Error: No such network: network_id") + assert.Check(t, is.Error(err, "Error: No such network: network_id")) } diff --git a/vendor/github.com/docker/docker/client/network_prune_test.go b/vendor/github.com/docker/docker/client/network_prune_test.go index 8bba3751e..85908f0cf 100644 --- a/vendor/github.com/docker/docker/client/network_prune_test.go +++ b/vendor/github.com/docker/docker/client/network_prune_test.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -89,7 +90,7 @@ func TestNetworksPrune(t *testing.T) { query := req.URL.Query() for key, expected := range listCase.expectedQueryParams { actual := query.Get(key) - assert.Equal(t, expected, actual) + assert.Check(t, is.Equal(expected, actual)) } content, err := json.Marshal(types.NetworksPruneReport{ NetworksDeleted: []string{"network_id1", "network_id2"}, @@ -106,7 +107,7 @@ func TestNetworksPrune(t *testing.T) { } report, err := client.NetworksPrune(context.Background(), listCase.filters) - assert.NoError(t, err) - assert.Len(t, report.NetworksDeleted, 2) + assert.Check(t, err) + assert.Check(t, is.Len(report.NetworksDeleted, 2)) } } diff --git a/vendor/github.com/docker/docker/client/ping_test.go b/vendor/github.com/docker/docker/client/ping_test.go index 69ff86269..f83233ace 100644 --- a/vendor/github.com/docker/docker/client/ping_test.go +++ b/vendor/github.com/docker/docker/client/ping_test.go @@ -7,7 +7,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -31,15 +32,15 @@ func TestPingFail(t *testing.T) { } ping, err := client.Ping(context.Background()) - assert.Error(t, err) - assert.Equal(t, false, ping.Experimental) - assert.Equal(t, "", ping.APIVersion) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(false, ping.Experimental)) + assert.Check(t, is.Equal("", ping.APIVersion)) withHeader = true ping2, err := client.Ping(context.Background()) - assert.Error(t, err) - assert.Equal(t, true, ping2.Experimental) - assert.Equal(t, "awesome", ping2.APIVersion) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(true, ping2.Experimental)) + assert.Check(t, is.Equal("awesome", ping2.APIVersion)) } // TestPingWithError tests the case where there is a protocol error in the ping. @@ -57,9 +58,9 @@ func TestPingWithError(t *testing.T) { } ping, err := client.Ping(context.Background()) - assert.Error(t, err) - assert.Equal(t, false, ping.Experimental) - assert.Equal(t, "", ping.APIVersion) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(false, ping.Experimental)) + assert.Check(t, is.Equal("", ping.APIVersion)) } // TestPingSuccess tests that we are able to get the expected API headers/ping @@ -76,7 +77,7 @@ func TestPingSuccess(t *testing.T) { }), } ping, err := client.Ping(context.Background()) - assert.Error(t, err) - assert.Equal(t, true, ping.Experimental) - assert.Equal(t, "awesome", ping.APIVersion) + assert.Check(t, is.ErrorContains(err, "")) + assert.Check(t, is.Equal(true, ping.Experimental)) + assert.Check(t, is.Equal("awesome", ping.APIVersion)) } diff --git a/vendor/github.com/docker/docker/client/request_test.go b/vendor/github.com/docker/docker/client/request_test.go index 1dbfed62c..1a0a87e2f 100644 --- a/vendor/github.com/docker/docker/client/request_test.go +++ b/vendor/github.com/docker/docker/client/request_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" "golang.org/x/net/context" ) @@ -46,7 +46,7 @@ func TestSetHostHeader(t *testing.T) { for c, test := range testCases { hostURL, err := ParseHostURL(test.host) - require.NoError(t, err) + assert.NilError(t, err) client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { @@ -71,7 +71,7 @@ func TestSetHostHeader(t *testing.T) { } _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) - require.NoError(t, err) + assert.NilError(t, err) } } diff --git a/vendor/github.com/docker/docker/client/secret_create_test.go b/vendor/github.com/docker/docker/client/secret_create_test.go index 7d54e1aeb..b31cab509 100644 --- a/vendor/github.com/docker/docker/client/secret_create_test.go +++ b/vendor/github.com/docker/docker/client/secret_create_test.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -21,7 +22,7 @@ func TestSecretCreateUnsupported(t *testing.T) { client: &http.Client{}, } _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) - assert.EqualError(t, err, `"secret create" requires API version 1.25, but the Docker daemon API version is 1.24`) + assert.Check(t, is.Error(err, `"secret create" requires API version 1.25, but the Docker daemon API version is 1.24`)) } func TestSecretCreateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/secret_inspect_test.go b/vendor/github.com/docker/docker/client/secret_inspect_test.go index eb63162c1..0bb3ae24c 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect_test.go +++ b/vendor/github.com/docker/docker/client/secret_inspect_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -21,7 +22,7 @@ func TestSecretInspectUnsupported(t *testing.T) { client: &http.Client{}, } _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") - assert.EqualError(t, err, `"secret inspect" requires API version 1.25, but the Docker daemon API version is 1.24`) + assert.Check(t, is.Error(err, `"secret inspect" requires API version 1.25, but the Docker daemon API version is 1.24`)) } func TestSecretInspectError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/secret_list_test.go b/vendor/github.com/docker/docker/client/secret_list_test.go index 2bd427898..36d8e8e2f 100644 --- a/vendor/github.com/docker/docker/client/secret_list_test.go +++ b/vendor/github.com/docker/docker/client/secret_list_test.go @@ -12,7 +12,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -22,7 +23,7 @@ func TestSecretListUnsupported(t *testing.T) { client: &http.Client{}, } _, err := client.SecretList(context.Background(), types.SecretListOptions{}) - assert.EqualError(t, err, `"secret list" requires API version 1.25, but the Docker daemon API version is 1.24`) + assert.Check(t, is.Error(err, `"secret list" requires API version 1.25, but the Docker daemon API version is 1.24`)) } func TestSecretListError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/secret_remove_test.go b/vendor/github.com/docker/docker/client/secret_remove_test.go index 44cc0cbcc..37c22650d 100644 --- a/vendor/github.com/docker/docker/client/secret_remove_test.go +++ b/vendor/github.com/docker/docker/client/secret_remove_test.go @@ -8,7 +8,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -18,7 +19,7 @@ func TestSecretRemoveUnsupported(t *testing.T) { client: &http.Client{}, } err := client.SecretRemove(context.Background(), "secret_id") - assert.EqualError(t, err, `"secret remove" requires API version 1.25, but the Docker daemon API version is 1.24`) + assert.Check(t, is.Error(err, `"secret remove" requires API version 1.25, but the Docker daemon API version is 1.24`)) } func TestSecretRemoveError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/secret_update_test.go b/vendor/github.com/docker/docker/client/secret_update_test.go index d2fca4b2d..3ff172ba5 100644 --- a/vendor/github.com/docker/docker/client/secret_update_test.go +++ b/vendor/github.com/docker/docker/client/secret_update_test.go @@ -9,7 +9,8 @@ import ( "testing" "github.com/docker/docker/api/types/swarm" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -19,7 +20,7 @@ func TestSecretUpdateUnsupported(t *testing.T) { client: &http.Client{}, } err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) - assert.EqualError(t, err, `"secret update" requires API version 1.25, but the Docker daemon API version is 1.24`) + assert.Check(t, is.Error(err, `"secret update" requires API version 1.25, but the Docker daemon API version is 1.24`)) } func TestSecretUpdateError(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/service_create_test.go b/vendor/github.com/docker/docker/client/service_create_test.go index 9e859b18a..c5d8ae4ff 100644 --- a/vendor/github.com/docker/docker/client/service_create_test.go +++ b/vendor/github.com/docker/docker/client/service_create_test.go @@ -12,9 +12,10 @@ import ( "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -73,8 +74,8 @@ func TestServiceCreateCompatiblePlatforms(t *testing.T) { return nil, err } - assert.Equal(t, "foobar:1.0@sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", serviceSpec.TaskTemplate.ContainerSpec.Image) - assert.Len(t, serviceSpec.TaskTemplate.Placement.Platforms, 1) + assert.Check(t, is.Equal("foobar:1.0@sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96", serviceSpec.TaskTemplate.ContainerSpec.Image)) + assert.Check(t, is.Len(serviceSpec.TaskTemplate.Placement.Platforms, 1)) p := serviceSpec.TaskTemplate.Placement.Platforms[0] b, err := json.Marshal(types.ServiceCreateResponse{ @@ -115,8 +116,8 @@ func TestServiceCreateCompatiblePlatforms(t *testing.T) { spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: &swarm.ContainerSpec{Image: "foobar:1.0"}}} r, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{QueryRegistry: true}) - assert.NoError(t, err) - assert.Equal(t, "service_linux_amd64", r.ID) + assert.Check(t, err) + assert.Check(t, is.Equal("service_linux_amd64", r.ID)) } func TestServiceCreateDigestPinning(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/service_remove_test.go b/vendor/github.com/docker/docker/client/service_remove_test.go index 0909c9e06..9198763f8 100644 --- a/vendor/github.com/docker/docker/client/service_remove_test.go +++ b/vendor/github.com/docker/docker/client/service_remove_test.go @@ -8,7 +8,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -18,7 +19,7 @@ func TestServiceRemoveError(t *testing.T) { } err := client.ServiceRemove(context.Background(), "service_id") - assert.EqualError(t, err, "Error response from daemon: Server error") + assert.Check(t, is.Error(err, "Error response from daemon: Server error")) } func TestServiceRemoveNotFoundError(t *testing.T) { @@ -27,8 +28,8 @@ func TestServiceRemoveNotFoundError(t *testing.T) { } err := client.ServiceRemove(context.Background(), "service_id") - assert.EqualError(t, err, "Error: No such service: service_id") - assert.True(t, IsErrNotFound(err)) + assert.Check(t, is.Error(err, "Error: No such service: service_id")) + assert.Check(t, IsErrNotFound(err)) } func TestServiceRemove(t *testing.T) { diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go index c4ac70738..aff79440f 100644 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key_test.go @@ -11,8 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -55,6 +55,6 @@ func TestSwarmGetUnlockKey(t *testing.T) { } resp, err := client.SwarmGetUnlockKey(context.Background()) - require.NoError(t, err) - assert.Equal(t, unlockKey, resp.UnlockKey) + assert.NilError(t, err) + assert.Check(t, is.Equal(unlockKey, resp.UnlockKey)) } diff --git a/vendor/github.com/docker/docker/client/volume_inspect_test.go b/vendor/github.com/docker/docker/client/volume_inspect_test.go index c97f5c721..4a2cf7c7d 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect_test.go +++ b/vendor/github.com/docker/docker/client/volume_inspect_test.go @@ -11,9 +11,9 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -32,7 +32,7 @@ func TestVolumeInspectNotFound(t *testing.T) { } _, err := client.VolumeInspect(context.Background(), "unknown") - assert.True(t, IsErrNotFound(err)) + assert.Check(t, IsErrNotFound(err)) } func TestVolumeInspectWithEmptyID(t *testing.T) { @@ -75,6 +75,6 @@ func TestVolumeInspect(t *testing.T) { } volume, err := client.VolumeInspect(context.Background(), "volume_id") - require.NoError(t, err) - assert.Equal(t, expected, volume) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, volume)) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go b/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go index 2705d671b..eaa53d8f6 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/config_unix_test.go @@ -6,8 +6,9 @@ import ( "testing" "github.com/docker/docker/daemon/config" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" ) func TestDaemonParseShmSize(t *testing.T) { @@ -16,7 +17,7 @@ func TestDaemonParseShmSize(t *testing.T) { conf := &config.Config{} installConfigFlags(conf, flags) // By default `--default-shm-size=64M` - assert.Equal(t, int64(64*1024*1024), conf.ShmSize.Value()) - assert.NoError(t, flags.Set("default-shm-size", "128M")) - assert.Equal(t, int64(128*1024*1024), conf.ShmSize.Value()) + assert.Check(t, is.Equal(int64(64*1024*1024), conf.ShmSize.Value())) + assert.Check(t, flags.Set("default-shm-size", "128M")) + assert.Check(t, is.Equal(int64(128*1024*1024), conf.ShmSize.Value())) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go index b06583187..e5c2c2ec7 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_test.go @@ -5,11 +5,11 @@ import ( "github.com/docker/docker/daemon/config" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" "github.com/sirupsen/logrus" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func defaultOptions(configFile string) *daemonOptions { @@ -27,8 +27,8 @@ func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { opts.Debug = true loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) if !loadedConfig.Debug { t.Fatalf("expected debug to be copied from the common flags, got false") } @@ -40,9 +40,9 @@ func TestLoadDaemonCliConfigWithTLS(t *testing.T) { opts.TLS = true loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.Equal(t, "/tmp/ca.pem", loadedConfig.CommonTLSOptions.CAFile) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal("/tmp/ca.pem", loadedConfig.CommonTLSOptions.CAFile)) } func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { @@ -53,9 +53,9 @@ func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { opts := defaultOptions(configFile) flags := opts.flags - assert.NoError(t, flags.Set("config-file", configFile)) - assert.NoError(t, flags.Set("label", "l1=bar")) - assert.NoError(t, flags.Set("label", "l2=baz")) + assert.Check(t, flags.Set("config-file", configFile)) + assert.Check(t, flags.Set("label", "l1=bar")) + assert.Check(t, flags.Set("label", "l2=baz")) _, err := loadDaemonCliConfig(opts) testutil.ErrorContains(t, err, "as a flag and in the configuration file: labels") @@ -69,9 +69,9 @@ func TestLoadDaemonCliWithConflictingNodeGenericResources(t *testing.T) { opts := defaultOptions(configFile) flags := opts.flags - assert.NoError(t, flags.Set("config-file", configFile)) - assert.NoError(t, flags.Set("node-generic-resource", "r1=bar")) - assert.NoError(t, flags.Set("node-generic-resource", "r2=baz")) + assert.Check(t, flags.Set("config-file", configFile)) + assert.Check(t, flags.Set("node-generic-resource", "r1=bar")) + assert.Check(t, flags.Set("node-generic-resource", "r2=baz")) _, err := loadDaemonCliConfig(opts) testutil.ErrorContains(t, err, "as a flag and in the configuration file: node-generic-resources") @@ -81,22 +81,22 @@ func TestLoadDaemonCliWithConflictingLabels(t *testing.T) { opts := defaultOptions("") flags := opts.flags - assert.NoError(t, flags.Set("label", "foo=bar")) - assert.NoError(t, flags.Set("label", "foo=baz")) + assert.Check(t, flags.Set("label", "foo=bar")) + assert.Check(t, flags.Set("label", "foo=baz")) _, err := loadDaemonCliConfig(opts) - assert.EqualError(t, err, "conflict labels for foo=baz and foo=bar") + assert.Check(t, is.Error(err, "conflict labels for foo=baz and foo=bar")) } func TestLoadDaemonCliWithDuplicateLabels(t *testing.T) { opts := defaultOptions("") flags := opts.flags - assert.NoError(t, flags.Set("label", "foo=the-same")) - assert.NoError(t, flags.Set("label", "foo=the-same")) + assert.Check(t, flags.Set("label", "foo=the-same")) + assert.Check(t, flags.Set("label", "foo=the-same")) _, err := loadDaemonCliConfig(opts) - assert.NoError(t, err) + assert.Check(t, err) } func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { @@ -107,9 +107,9 @@ func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { opts.TLSOptions.CAFile = "/tmp/ca.pem" loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.Equal(t, loadedConfig.TLS, true) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal(loadedConfig.TLS, true)) } func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { @@ -120,9 +120,9 @@ func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { opts.TLSOptions.CAFile = "/tmp/ca.pem" loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.True(t, loadedConfig.TLS) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, loadedConfig.TLS) } func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { @@ -133,9 +133,9 @@ func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { opts.TLSOptions.CAFile = "/tmp/ca.pem" loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.False(t, loadedConfig.TLS) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, !loadedConfig.TLS) } func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { @@ -144,10 +144,10 @@ func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.Equal(t, "warn", loadedConfig.LogLevel) - assert.Equal(t, logrus.WarnLevel, logrus.GetLevel()) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal("warn", loadedConfig.LogLevel)) + assert.Check(t, is.Equal(logrus.WarnLevel, logrus.GetLevel())) } func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { @@ -157,10 +157,10 @@ func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.Equal(t, "/etc/certs/ca.pem", loadedConfig.CommonTLSOptions.CAFile) - assert.Equal(t, "syslog", loadedConfig.LogConfig.Type) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, is.Equal("/etc/certs/ca.pem", loadedConfig.CommonTLSOptions.CAFile)) + assert.Check(t, is.Equal("syslog", loadedConfig.LogConfig.Type)) } func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { @@ -174,10 +174,10 @@ func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) - assert.Len(t, loadedConfig.AllowNondistributableArtifacts, 1) - assert.Len(t, loadedConfig.Mirrors, 1) - assert.Len(t, loadedConfig.InsecureRegistries, 1) + assert.Check(t, is.Len(loadedConfig.AllowNondistributableArtifacts, 1)) + assert.Check(t, is.Len(loadedConfig.Mirrors, 1)) + assert.Check(t, is.Len(loadedConfig.InsecureRegistries, 1)) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go index a65d8ed01..6ab2ada48 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go @@ -104,10 +104,6 @@ func allocateDaemonPort(addr string) error { return nil } -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - func wrapListeners(proto string, ls []net.Listener) []net.Listener { switch proto { case "unix": diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go index 41c392e1b..39ff1e682 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix_test.go @@ -6,9 +6,9 @@ import ( "testing" "github.com/docker/docker/daemon/config" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { @@ -19,17 +19,17 @@ func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { opts := defaultOptions(tempFile.Path()) opts.Debug = true opts.LogLevel = "info" - assert.NoError(t, opts.flags.Set("selinux-enabled", "true")) + assert.Check(t, opts.flags.Set("selinux-enabled", "true")) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) - assert.True(t, loadedConfig.Debug) - assert.Equal(t, "info", loadedConfig.LogLevel) - assert.True(t, loadedConfig.EnableSelinuxSupport) - assert.Equal(t, "json-file", loadedConfig.LogConfig.Type) - assert.Equal(t, "1k", loadedConfig.LogConfig.Config["max-size"]) + assert.Check(t, loadedConfig.Debug) + assert.Check(t, is.Equal("info", loadedConfig.LogLevel)) + assert.Check(t, loadedConfig.EnableSelinuxSupport) + assert.Check(t, is.Equal("json-file", loadedConfig.LogConfig.Type)) + assert.Check(t, is.Equal("1k", loadedConfig.LogConfig.Config["max-size"])) } func TestLoadDaemonConfigWithNetwork(t *testing.T) { @@ -39,11 +39,11 @@ func TestLoadDaemonConfigWithNetwork(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) - assert.Equal(t, "127.0.0.2", loadedConfig.IP) - assert.Equal(t, "127.0.0.1", loadedConfig.DefaultIP.String()) + assert.Check(t, is.Equal("127.0.0.2", loadedConfig.IP)) + assert.Check(t, is.Equal("127.0.0.1", loadedConfig.DefaultIP.String())) } func TestLoadDaemonConfigWithMapOptions(t *testing.T) { @@ -56,14 +56,14 @@ func TestLoadDaemonConfigWithMapOptions(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) - assert.NotNil(t, loadedConfig.ClusterOpts) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) + assert.Check(t, loadedConfig.ClusterOpts != nil) expectedPath := "/var/lib/docker/discovery_certs/ca.pem" - assert.Equal(t, expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"]) - assert.NotNil(t, loadedConfig.LogConfig.Config) - assert.Equal(t, "test", loadedConfig.LogConfig.Config["tag"]) + assert.Check(t, is.Equal(expectedPath, loadedConfig.ClusterOpts["kv.cacertfile"])) + assert.Check(t, loadedConfig.LogConfig.Config != nil) + assert.Check(t, is.Equal("test", loadedConfig.LogConfig.Config["tag"])) } func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { @@ -73,17 +73,17 @@ func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) - assert.False(t, loadedConfig.EnableUserlandProxy) + assert.Check(t, !loadedConfig.EnableUserlandProxy) // make sure reloading doesn't generate configuration // conflicts after normalizing boolean values. reload := func(reloadedConfig *config.Config) { - assert.False(t, reloadedConfig.EnableUserlandProxy) + assert.Check(t, !reloadedConfig.EnableUserlandProxy) } - assert.NoError(t, config.Reload(opts.configFile, opts.flags, reload)) + assert.Check(t, config.Reload(opts.configFile, opts.flags, reload)) } func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { @@ -92,8 +92,8 @@ func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { opts := defaultOptions(tempFile.Path()) loadedConfig, err := loadDaemonCliConfig(opts) - require.NoError(t, err) - require.NotNil(t, loadedConfig) + assert.NilError(t, err) + assert.Assert(t, loadedConfig != nil) - assert.True(t, loadedConfig.EnableUserlandProxy) + assert.Check(t, loadedConfig.EnableUserlandProxy) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker.go b/vendor/github.com/docker/docker/cmd/dockerd/docker.go index 2ccca46c1..e90a12e36 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - "path/filepath" "runtime" "github.com/docker/docker/cli" @@ -25,6 +24,10 @@ func newDaemonCommand() *cobra.Command { SilenceErrors: true, Args: cli.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { + if opts.version { + showVersion() + return nil + } opts.flags = cmd.Flags() return runDaemon(opts) }, @@ -41,45 +44,6 @@ func newDaemonCommand() *cobra.Command { return cmd } -func runDaemon(opts *daemonOptions) error { - if opts.version { - showVersion() - return nil - } - - daemonCli := NewDaemonCli() - - // Windows specific settings as these are not defaulted. - if runtime.GOOS == "windows" { - if opts.daemonConfig.Pidfile == "" { - opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") - } - if opts.configFile == "" { - opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) - } - } - - // On Windows, this may be launching as a service or with an option to - // register the service. - stop, runAsService, err := initService(daemonCli) - if err != nil { - logrus.Fatal(err) - } - - if stop { - return nil - } - - // If Windows SCM manages the service - no need for PID files - if runAsService { - opts.daemonConfig.Pidfile = "" - } - - err = daemonCli.start(opts) - notifyShutdown(err) - return err -} - func showVersion() { fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go new file mode 100644 index 000000000..0dec48663 --- /dev/null +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package main + +func runDaemon(opts *daemonOptions) error { + daemonCli := NewDaemonCli() + return daemonCli.start(opts) +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go index 889e35272..bd8bc5a58 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go @@ -1,5 +1,38 @@ package main import ( + "path/filepath" + _ "github.com/docker/docker/autogen/winresources/dockerd" + "github.com/sirupsen/logrus" ) + +func runDaemon(opts *daemonOptions) error { + daemonCli := NewDaemonCli() + + // On Windows, this may be launching as a service or with an option to + // register the service. + stop, runAsService, err := initService(daemonCli) + if err != nil { + logrus.Fatal(err) + } + + if stop { + return nil + } + + // Windows specific settings as these are not defaulted. + if opts.configFile == "" { + opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) + } + if runAsService { + // If Windows SCM manages the service - no need for PID files + opts.daemonConfig.Pidfile = "" + } else if opts.daemonConfig.Pidfile == "" { + opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") + } + + err = daemonCli.start(opts) + notifyShutdown(err) + return err +} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/options_test.go b/vendor/github.com/docker/docker/cmd/dockerd/options_test.go index c3298a0ac..2a4e63b6b 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/options_test.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/options_test.go @@ -6,8 +6,9 @@ import ( cliconfig "github.com/docker/docker/cli/config" "github.com/docker/docker/daemon/config" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" ) func TestCommonOptionsInstallFlags(t *testing.T) { @@ -20,10 +21,10 @@ func TestCommonOptionsInstallFlags(t *testing.T) { "--tlscert=\"/foo/cert\"", "--tlskey=\"/foo/key\"", }) - assert.NoError(t, err) - assert.Equal(t, "/foo/cafile", opts.TLSOptions.CAFile) - assert.Equal(t, "/foo/cert", opts.TLSOptions.CertFile) - assert.Equal(t, opts.TLSOptions.KeyFile, "/foo/key") + assert.Check(t, err) + assert.Check(t, is.Equal("/foo/cafile", opts.TLSOptions.CAFile)) + assert.Check(t, is.Equal("/foo/cert", opts.TLSOptions.CertFile)) + assert.Check(t, is.Equal(opts.TLSOptions.KeyFile, "/foo/key")) } func defaultPath(filename string) string { @@ -36,8 +37,8 @@ func TestCommonOptionsInstallFlagsWithDefaults(t *testing.T) { opts.InstallFlags(flags) err := flags.Parse([]string{}) - assert.NoError(t, err) - assert.Equal(t, defaultPath("ca.pem"), opts.TLSOptions.CAFile) - assert.Equal(t, defaultPath("cert.pem"), opts.TLSOptions.CertFile) - assert.Equal(t, defaultPath("key.pem"), opts.TLSOptions.KeyFile) + assert.Check(t, err) + assert.Check(t, is.Equal(defaultPath("ca.pem"), opts.TLSOptions.CAFile)) + assert.Check(t, is.Equal(defaultPath("cert.pem"), opts.TLSOptions.CertFile)) + assert.Check(t, is.Equal(defaultPath("key.pem"), opts.TLSOptions.KeyFile)) } diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go index e67ad474b..bbcb7f3f3 100644 --- a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go +++ b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go @@ -6,9 +6,5 @@ import ( "github.com/spf13/pflag" ) -func initService(daemonCli *DaemonCli) (bool, bool, error) { - return false, false, nil -} - func installServiceFlags(flags *pflag.FlagSet) { } diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go index 960d7bf61..ed72c4a40 100644 --- a/vendor/github.com/docker/docker/container/archive.go +++ b/vendor/github.com/docker/docker/container/archive.go @@ -6,6 +6,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" ) // ResolvePath resolves the given path in the container to a resource on the @@ -13,6 +14,9 @@ import ( // the absolute path to the resource relative to the container's rootfs, and // an error if the path points to outside the container's rootfs. func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + if container.BaseFS == nil { + return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } // Check if a drive letter supplied, it must be the system drive. No-op except on Windows path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS) if err != nil { @@ -45,6 +49,9 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri // resolved to a path on the host corresponding to the given absolute path // inside the container. func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + if container.BaseFS == nil { + return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil") + } driver := container.BaseFS lstat, err := driver.Lstat(resolvedPath) diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go index 461139b43..a076e8074 100644 --- a/vendor/github.com/docker/docker/container/container.go +++ b/vendor/github.com/docker/docker/container/container.go @@ -311,6 +311,9 @@ func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error // symlinking to a different path) between using this method and using the // path. See symlink.FollowSymlinkInScope for more details. func (container *Container) GetResourcePath(path string) (string, error) { + if container.BaseFS == nil { + return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS agnostic way. r, e := container.BaseFS.ResolveScopedPath(path, false) diff --git a/vendor/github.com/docker/docker/container/container_unit_test.go b/vendor/github.com/docker/docker/container/container_unit_test.go index 863a47a1f..bf45df942 100644 --- a/vendor/github.com/docker/docker/container/container_unit_test.go +++ b/vendor/github.com/docker/docker/container/container_unit_test.go @@ -11,7 +11,7 @@ import ( swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/pkg/signal" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestContainerStopSignal(t *testing.T) { @@ -74,7 +74,7 @@ func TestContainerSecretReferenceDestTarget(t *testing.T) { func TestContainerLogPathSetForJSONFileLogger(t *testing.T) { containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForJSONFileLogger") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(containerRoot) c := &Container{ @@ -89,17 +89,17 @@ func TestContainerLogPathSetForJSONFileLogger(t *testing.T) { } logger, err := c.StartLogger() - require.NoError(t, err) + assert.NilError(t, err) defer logger.Close() expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID))) - require.NoError(t, err) - require.Equal(t, c.LogPath, expectedLogPath) + assert.NilError(t, err) + assert.Equal(t, c.LogPath, expectedLogPath) } func TestContainerLogPathSetForRingLogger(t *testing.T) { containerRoot, err := ioutil.TempDir("", "TestContainerLogPathSetForRingLogger") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(containerRoot) c := &Container{ @@ -117,10 +117,10 @@ func TestContainerLogPathSetForRingLogger(t *testing.T) { } logger, err := c.StartLogger() - require.NoError(t, err) + assert.NilError(t, err) defer logger.Close() expectedLogPath, err := filepath.Abs(filepath.Join(containerRoot, fmt.Sprintf("%s-json.log", c.ID))) - require.NoError(t, err) - require.Equal(t, c.LogPath, expectedLogPath) + assert.NilError(t, err) + assert.Equal(t, c.LogPath, expectedLogPath) } diff --git a/vendor/github.com/docker/docker/container/view_test.go b/vendor/github.com/docker/docker/container/view_test.go index 26803b04e..a872dffea 100644 --- a/vendor/github.com/docker/docker/container/view_test.go +++ b/vendor/github.com/docker/docker/container/view_test.go @@ -8,8 +8,9 @@ import ( "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pborman/uuid" - "github.com/stretchr/testify/assert" ) var root string @@ -109,56 +110,56 @@ func TestNames(t *testing.T) { if err != nil { t.Fatal(err) } - assert.NoError(t, db.ReserveName("name1", "containerid1")) - assert.NoError(t, db.ReserveName("name1", "containerid1")) // idempotent - assert.NoError(t, db.ReserveName("name2", "containerid2")) - assert.EqualError(t, db.ReserveName("name2", "containerid3"), ErrNameReserved.Error()) + assert.Check(t, db.ReserveName("name1", "containerid1")) + assert.Check(t, db.ReserveName("name1", "containerid1")) // idempotent + assert.Check(t, db.ReserveName("name2", "containerid2")) + assert.Check(t, is.Error(db.ReserveName("name2", "containerid3"), ErrNameReserved.Error())) // Releasing a name allows the name to point to something else later. - assert.NoError(t, db.ReleaseName("name2")) - assert.NoError(t, db.ReserveName("name2", "containerid3")) + assert.Check(t, db.ReleaseName("name2")) + assert.Check(t, db.ReserveName("name2", "containerid3")) view := db.Snapshot() id, err := view.GetID("name1") - assert.NoError(t, err) - assert.Equal(t, "containerid1", id) + assert.Check(t, err) + assert.Check(t, is.Equal("containerid1", id)) id, err = view.GetID("name2") - assert.NoError(t, err) - assert.Equal(t, "containerid3", id) + assert.Check(t, err) + assert.Check(t, is.Equal("containerid3", id)) _, err = view.GetID("notreserved") - assert.EqualError(t, err, ErrNameNotReserved.Error()) + assert.Check(t, is.Error(err, ErrNameNotReserved.Error())) // Releasing and re-reserving a name doesn't affect the snapshot. - assert.NoError(t, db.ReleaseName("name2")) - assert.NoError(t, db.ReserveName("name2", "containerid4")) + assert.Check(t, db.ReleaseName("name2")) + assert.Check(t, db.ReserveName("name2", "containerid4")) id, err = view.GetID("name1") - assert.NoError(t, err) - assert.Equal(t, "containerid1", id) + assert.Check(t, err) + assert.Check(t, is.Equal("containerid1", id)) id, err = view.GetID("name2") - assert.NoError(t, err) - assert.Equal(t, "containerid3", id) + assert.Check(t, err) + assert.Check(t, is.Equal("containerid3", id)) // GetAllNames - assert.Equal(t, map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames()) + assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames())) - assert.NoError(t, db.ReserveName("name3", "containerid1")) - assert.NoError(t, db.ReserveName("name4", "containerid1")) + assert.Check(t, db.ReserveName("name3", "containerid1")) + assert.Check(t, db.ReserveName("name4", "containerid1")) view = db.Snapshot() - assert.Equal(t, map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames()) + assert.Check(t, is.DeepEqual(map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames())) // Release containerid1's names with Delete even though no container exists - assert.NoError(t, db.Delete(&Container{ID: "containerid1"})) + assert.Check(t, db.Delete(&Container{ID: "containerid1"})) // Reusing one of those names should work - assert.NoError(t, db.ReserveName("name1", "containerid4")) + assert.Check(t, db.ReserveName("name1", "containerid4")) view = db.Snapshot() - assert.Equal(t, map[string][]string{"containerid4": {"name1", "name2"}}, view.GetAllNames()) + assert.Check(t, is.DeepEqual(map[string][]string{"containerid4": {"name1", "name2"}}, view.GetAllNames())) } // Test case for GitHub issue 35920 diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go b/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go index 347aa028b..0794af99a 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go +++ b/vendor/github.com/docker/docker/daemon/cluster/convert/service_test.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm/runtime" swarmapi "github.com/docker/swarmkit/api" google_protobuf3 "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) { @@ -178,12 +178,12 @@ func TestServiceConvertToGRPCIsolation(t *testing.T) { }, } res, err := ServiceSpecToGRPC(s) - require.NoError(t, err) + assert.NilError(t, err) v, ok := res.Task.Runtime.(*swarmapi.TaskSpec_Container) if !ok { t.Fatal("expected type swarmapi.TaskSpec_Container") } - require.Equal(t, c.to, v.Container.Isolation) + assert.Equal(t, c.to, v.Container.Isolation) }) } } @@ -228,7 +228,7 @@ func TestServiceConvertFromGRPCIsolation(t *testing.T) { t.Fatal(err) } - require.Equal(t, c.to, svc.Spec.TaskTemplate.ContainerSpec.Isolation) + assert.Equal(t, c.to, svc.Spec.TaskTemplate.ContainerSpec.Isolation) }) } } diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go index 456cd403b..1e9417197 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go +++ b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container_test.go @@ -5,7 +5,7 @@ import ( container "github.com/docker/docker/api/types/container" swarmapi "github.com/docker/swarmkit/api" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestIsolationConversion(t *testing.T) { @@ -31,7 +31,7 @@ func TestIsolationConversion(t *testing.T) { }, } config := containerConfig{task: &task} - require.Equal(t, c.to, config.hostConfig().Isolation) + assert.Equal(t, c.to, config.hostConfig().Isolation) }) } } diff --git a/vendor/github.com/docker/docker/daemon/cluster/noderunner.go b/vendor/github.com/docker/docker/daemon/cluster/noderunner.go index b538e844e..989551a6c 100644 --- a/vendor/github.com/docker/docker/daemon/cluster/noderunner.go +++ b/vendor/github.com/docker/docker/daemon/cluster/noderunner.go @@ -124,8 +124,11 @@ func (n *nodeRunner) start(conf nodeStartConfig) error { n.cluster.config.Backend, n.cluster.config.PluginBackend, n.cluster.config.ImageBackend), - HeartbeatTick: 1, - ElectionTick: 3, + HeartbeatTick: 1, + // Recommended value in etcd/raft is 10 x (HeartbeatTick). + // Lower values were seen to have caused instability because of + // frequent leader elections when running on flakey networks. + ElectionTick: 10, UnlockKey: conf.lockKey, AutoLockManagers: conf.autolock, PluginGetter: n.cluster.config.Backend.PluginGetter(), diff --git a/vendor/github.com/docker/docker/daemon/config/config_test.go b/vendor/github.com/docker/docker/daemon/config/config_test.go index 53db2922c..2fe2b3805 100644 --- a/vendor/github.com/docker/docker/daemon/config/config_test.go +++ b/vendor/github.com/docker/docker/daemon/config/config_test.go @@ -9,9 +9,10 @@ import ( "github.com/docker/docker/daemon/discovery" "github.com/docker/docker/internal/testutil" "github.com/docker/docker/opts" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" ) func TestDaemonConfigurationNotFound(t *testing.T) { @@ -59,7 +60,7 @@ func TestFindConfigurationConflicts(t *testing.T) { flags := pflag.NewFlagSet("test", pflag.ContinueOnError) flags.String("authorization-plugins", "", "") - assert.NoError(t, flags.Set("authorization-plugins", "asdf")) + assert.Check(t, flags.Set("authorization-plugins", "asdf")) testutil.ErrorContains(t, findConfigurationConflicts(config, flags), @@ -72,8 +73,8 @@ func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { var hosts []string flags.VarP(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), "host", "H", "Daemon socket(s) to connect to") - assert.NoError(t, flags.Set("host", "tcp://127.0.0.1:4444")) - assert.NoError(t, flags.Set("host", "unix:///var/run/docker.sock")) + assert.Check(t, flags.Set("host", "tcp://127.0.0.1:4444")) + assert.Check(t, flags.Set("host", "unix:///var/run/docker.sock")) testutil.ErrorContains(t, findConfigurationConflicts(config, flags), "hosts") } @@ -424,7 +425,7 @@ func TestReloadSetConfigFileNotExist(t *testing.T) { flags.Set("config-file", configFile) err := Reload(configFile, flags, func(c *Config) {}) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) testutil.ErrorContains(t, err, "unable to configure the Docker daemon with file") } @@ -438,8 +439,8 @@ func TestReloadDefaultConfigNotExist(t *testing.T) { err := Reload(configFile, flags, func(c *Config) { reloaded = true }) - assert.Nil(t, err) - assert.True(t, reloaded) + assert.Check(t, err) + assert.Check(t, reloaded) } // TestReloadBadDefaultConfig tests that when `--config-file` is not set @@ -457,7 +458,7 @@ func TestReloadBadDefaultConfig(t *testing.T) { flags := pflag.NewFlagSet("test", pflag.ContinueOnError) flags.String("config-file", configFile, "") err = Reload(configFile, flags, func(c *Config) {}) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) testutil.ErrorContains(t, err, "unable to configure the Docker daemon with file") } @@ -484,5 +485,5 @@ func TestReloadWithDuplicateLabels(t *testing.T) { flags.String("config-file", configFile, "") flags.StringSlice("labels", lbls, "") err := Reload(configFile, flags, func(c *Config) {}) - assert.NoError(t, err) + assert.Check(t, err) } diff --git a/vendor/github.com/docker/docker/daemon/config/config_unix_test.go b/vendor/github.com/docker/docker/daemon/config/config_unix_test.go index b4efa95e0..53eb42826 100644 --- a/vendor/github.com/docker/docker/daemon/config/config_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/config/config_unix_test.go @@ -7,10 +7,10 @@ import ( "github.com/docker/docker/opts" units "github.com/docker/go-units" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestGetConflictFreeConfiguration(t *testing.T) { @@ -39,9 +39,9 @@ func TestGetConflictFreeConfiguration(t *testing.T) { flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") cc, err := getConflictFreeConfiguration(file.Path(), flags) - require.NoError(t, err) + assert.NilError(t, err) - assert.True(t, cc.Debug) + assert.Check(t, cc.Debug) expectedUlimits := map[string]*units.Ulimit{ "nofile": { @@ -51,7 +51,7 @@ func TestGetConflictFreeConfiguration(t *testing.T) { }, } - assert.Equal(t, expectedUlimits, cc.Ulimits) + assert.Check(t, is.DeepEqual(expectedUlimits, cc.Ulimits)) } func TestDaemonConfigurationMerge(t *testing.T) { @@ -91,17 +91,17 @@ func TestDaemonConfigurationMerge(t *testing.T) { flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") cc, err := MergeDaemonConfigurations(c, flags, file.Path()) - require.NoError(t, err) + assert.NilError(t, err) - assert.True(t, cc.Debug) - assert.True(t, cc.AutoRestart) + assert.Check(t, cc.Debug) + assert.Check(t, cc.AutoRestart) expectedLogConfig := LogConfig{ Type: "syslog", Config: map[string]string{"tag": "test_tag"}, } - assert.Equal(t, expectedLogConfig, cc.LogConfig) + assert.Check(t, is.DeepEqual(expectedLogConfig, cc.LogConfig)) expectedUlimits := map[string]*units.Ulimit{ "nofile": { @@ -111,7 +111,7 @@ func TestDaemonConfigurationMerge(t *testing.T) { }, } - assert.Equal(t, expectedUlimits, cc.Ulimits) + assert.Check(t, is.DeepEqual(expectedUlimits, cc.Ulimits)) } func TestDaemonConfigurationMergeShmSize(t *testing.T) { @@ -127,8 +127,8 @@ func TestDaemonConfigurationMergeShmSize(t *testing.T) { flags.Var(&shmSize, "default-shm-size", "") cc, err := MergeDaemonConfigurations(c, flags, file.Path()) - require.NoError(t, err) + assert.NilError(t, err) expectedValue := 1 * 1024 * 1024 * 1024 - assert.Equal(t, int64(expectedValue), cc.ShmSize.Value()) + assert.Check(t, is.Equal(int64(expectedValue), cc.ShmSize.Value())) } diff --git a/vendor/github.com/docker/docker/daemon/config/config_windows_test.go b/vendor/github.com/docker/docker/daemon/config/config_windows_test.go index 5382bb3b7..fff98014f 100644 --- a/vendor/github.com/docker/docker/daemon/config/config_windows_test.go +++ b/vendor/github.com/docker/docker/daemon/config/config_windows_test.go @@ -7,9 +7,9 @@ import ( "testing" "github.com/docker/docker/opts" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestDaemonConfigurationMerge(t *testing.T) { @@ -46,15 +46,15 @@ func TestDaemonConfigurationMerge(t *testing.T) { flags.Var(opts.NewNamedMapOpts("log-opts", nil, nil), "log-opt", "") cc, err := MergeDaemonConfigurations(c, flags, configFile) - require.NoError(t, err) + assert.NilError(t, err) - assert.True(t, cc.Debug) - assert.True(t, cc.AutoRestart) + assert.Check(t, cc.Debug) + assert.Check(t, cc.AutoRestart) expectedLogConfig := LogConfig{ Type: "syslog", Config: map[string]string{"tag": "test_tag"}, } - assert.Equal(t, expectedLogConfig, cc.LogConfig) + assert.Check(t, is.DeepEqual(expectedLogConfig, cc.LogConfig)) } diff --git a/vendor/github.com/docker/docker/daemon/container_unix_test.go b/vendor/github.com/docker/docker/daemon/container_unix_test.go index e102be6cd..12075f8b8 100644 --- a/vendor/github.com/docker/docker/daemon/container_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/container_unix_test.go @@ -9,7 +9,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/daemon/config" "github.com/docker/go-connections/nat" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) // TestContainerWarningHostAndPublishPorts that a warning is returned when setting network mode to host and specifying published ports. @@ -38,7 +38,7 @@ func TestContainerWarningHostAndPublishPorts(t *testing.T) { } d := &Daemon{configStore: cs} wrns, err := d.verifyContainerSettings("", hostConfig, &containertypes.Config{}, false) - require.NoError(t, err) - require.Equal(t, tc.warnings, wrns) + assert.NilError(t, err) + assert.DeepEqual(t, tc.warnings, wrns) } } diff --git a/vendor/github.com/docker/docker/daemon/create_test.go b/vendor/github.com/docker/docker/daemon/create_test.go index 3e355f647..7ef49d762 100644 --- a/vendor/github.com/docker/docker/daemon/create_test.go +++ b/vendor/github.com/docker/docker/daemon/create_test.go @@ -5,7 +5,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/errdefs" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" ) // Test case for 35752 @@ -17,5 +17,5 @@ func TestVerifyNetworkingConfig(t *testing.T) { EndpointsConfig: endpoints, } err := verifyNetworkingConfig(nwConfig) - assert.True(t, errdefs.IsInvalidParameter(err)) + assert.Check(t, errdefs.IsInvalidParameter(err)) } diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go index ad651e3e1..195afb1e0 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_linux_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_linux_test.go @@ -11,8 +11,8 @@ import ( "github.com/docker/docker/oci" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" - - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio @@ -138,7 +138,7 @@ func TestTmpfsDevShmSizeOverride(t *testing.T) { // convert ms to spec spec := oci.DefaultSpec() err := setMounts(&d, &spec, c, ms) - assert.NoError(t, err) + assert.Check(t, err) // Check the resulting spec for the correct size found := false @@ -149,7 +149,7 @@ func TestTmpfsDevShmSizeOverride(t *testing.T) { continue } t.Logf("%+v\n", m.Options) - assert.Equal(t, "size="+size, o) + assert.Check(t, is.Equal("size="+size, o)) found = true } } @@ -163,7 +163,7 @@ func TestValidateContainerIsolationLinux(t *testing.T) { d := Daemon{} _, err := d.verifyContainerSettings("linux", &containertypes.HostConfig{Isolation: containertypes.IsolationHyperV}, nil, false) - assert.EqualError(t, err, "invalid isolation 'hyperv' on linux") + assert.Check(t, is.Error(err, "invalid isolation 'hyperv' on linux")) } func TestShouldUnmountRoot(t *testing.T) { @@ -222,7 +222,7 @@ func TestShouldUnmountRoot(t *testing.T) { if test.info != nil { test.info.Optional = options.Optional } - assert.Equal(t, expect, shouldUnmountRoot(test.root, test.info)) + assert.Check(t, is.Equal(expect, shouldUnmountRoot(test.root, test.info))) }) } }) diff --git a/vendor/github.com/docker/docker/daemon/daemon_test.go b/vendor/github.com/docker/docker/daemon/daemon_test.go index 5d40e1114..2fb4ff902 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_test.go @@ -19,8 +19,9 @@ import ( "github.com/docker/docker/volume/store" "github.com/docker/go-connections/nat" "github.com/docker/libnetwork" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" ) // @@ -312,7 +313,7 @@ func TestValidateContainerIsolation(t *testing.T) { d := Daemon{} _, err := d.verifyContainerSettings(runtime.GOOS, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false) - assert.EqualError(t, err, "invalid isolation 'invalid' on "+runtime.GOOS) + assert.Check(t, is.Error(err, "invalid isolation 'invalid' on "+runtime.GOOS)) } func TestFindNetworkErrorType(t *testing.T) { @@ -320,6 +321,6 @@ func TestFindNetworkErrorType(t *testing.T) { _, err := d.FindNetwork("fakeNet") _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork) if !errdefs.IsNotFound(err) || !ok { - assert.Fail(t, "The FindNetwork method MUST always return an error that implements the NotFound interface and is ErrNoSuchNetwork") + t.Error("The FindNetwork method MUST always return an error that implements the NotFound interface and is ErrNoSuchNetwork") } } diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix_test.go b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go index cd88b3833..84281c0b8 100644 --- a/vendor/github.com/docker/docker/daemon/daemon_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/daemon_unix_test.go @@ -17,7 +17,7 @@ import ( "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" "github.com/docker/docker/volume/store" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) type fakeContainerGetter struct { @@ -290,12 +290,12 @@ func TestMigratePre17Volumes(t *testing.T) { containerRoot := filepath.Join(rootDir, "containers") cid := "1234" err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) - require.NoError(t, err) + assert.NilError(t, err) vid := "5678" vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) err = os.MkdirAll(vfsPath, 0755) - require.NoError(t, err) + assert.NilError(t, err) config := []byte(` { diff --git a/vendor/github.com/docker/docker/daemon/delete_test.go b/vendor/github.com/docker/docker/daemon/delete_test.go index 48a8afec4..8bfa5d817 100644 --- a/vendor/github.com/docker/docker/daemon/delete_test.go +++ b/vendor/github.com/docker/docker/daemon/delete_test.go @@ -10,12 +10,12 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func newDaemonWithTmpRoot(t *testing.T) (*Daemon, func()) { tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - require.NoError(t, err) + assert.NilError(t, err) d := &Daemon{ repository: tmp, root: tmp, diff --git a/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go b/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go index 4560af287..d00e02e10 100644 --- a/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go +++ b/vendor/github.com/docker/docker/daemon/discovery/discovery_test.go @@ -5,8 +5,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestDiscoveryOptsErrors(t *testing.T) { @@ -42,26 +42,26 @@ func TestDiscoveryOptsErrors(t *testing.T) { for _, testcase := range testcases { _, _, err := discoveryOpts(testcase.opts) - assert.Error(t, err, testcase.doc) + assert.Check(t, is.ErrorContains(err, ""), testcase.doc) } } func TestDiscoveryOpts(t *testing.T) { clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} heartbeat, ttl, err := discoveryOpts(clusterOpts) - require.NoError(t, err) - assert.Equal(t, 10*time.Second, heartbeat) - assert.Equal(t, 20*time.Second, ttl) + assert.NilError(t, err) + assert.Check(t, is.Equal(10*time.Second, heartbeat)) + assert.Check(t, is.Equal(20*time.Second, ttl)) clusterOpts = map[string]string{"discovery.heartbeat": "10"} heartbeat, ttl, err = discoveryOpts(clusterOpts) - require.NoError(t, err) - assert.Equal(t, 10*time.Second, heartbeat) - assert.Equal(t, 10*defaultDiscoveryTTLFactor*time.Second, ttl) + assert.NilError(t, err) + assert.Check(t, is.Equal(10*time.Second, heartbeat)) + assert.Check(t, is.Equal(10*defaultDiscoveryTTLFactor*time.Second, ttl)) clusterOpts = map[string]string{"discovery.ttl": "30"} heartbeat, ttl, err = discoveryOpts(clusterOpts) - require.NoError(t, err) + assert.NilError(t, err) if ttl != 30*time.Second { t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go index 89c3c8969..6a94aca41 100644 --- a/vendor/github.com/docker/docker/daemon/exec.go +++ b/vendor/github.com/docker/docker/daemon/exec.go @@ -270,7 +270,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R case <-attachErr: // TERM signal worked } - return fmt.Errorf("context cancelled") + return ctx.Err() case err := <-attachErr: if err != nil { if _, ok := err.(term.EscapeError); !ok { diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go index 1ed26c2fc..cd52f4886 100644 --- a/vendor/github.com/docker/docker/daemon/exec_linux.go +++ b/vendor/github.com/docker/docker/daemon/exec_linux.go @@ -34,6 +34,8 @@ func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config if c.AppArmorProfile != "" { appArmorProfile = c.AppArmorProfile } else if c.HostConfig.Privileged { + // `docker exec --privileged` does not currently disable AppArmor + // profiles. Privileged configuration of the container is inherited appArmorProfile = "unconfined" } else { appArmorProfile = "docker-default" @@ -50,6 +52,7 @@ func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config return err } } + p.ApparmorProfile = appArmorProfile } daemon.setRlimits(&specs.Spec{Process: p}, c) return nil diff --git a/vendor/github.com/docker/docker/daemon/exec_linux_test.go b/vendor/github.com/docker/docker/daemon/exec_linux_test.go new file mode 100644 index 000000000..9e5496ae4 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec_linux_test.go @@ -0,0 +1,53 @@ +// +build linux + +package daemon + +import ( + "testing" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/container" + "github.com/docker/docker/daemon/exec" + "github.com/gotestyourself/gotestyourself/assert" + "github.com/opencontainers/runc/libcontainer/apparmor" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func TestExecSetPlatformOpt(t *testing.T) { + if !apparmor.IsEnabled() { + t.Skip("requires AppArmor to be enabled") + } + d := &Daemon{} + c := &container.Container{AppArmorProfile: "my-custom-profile"} + ec := &exec.Config{} + p := &specs.Process{} + + err := d.execSetPlatformOpt(c, ec, p) + assert.NilError(t, err) + assert.Equal(t, "my-custom-profile", p.ApparmorProfile) +} + +// TestExecSetPlatformOptPrivileged verifies that `docker exec --privileged` +// does not disable AppArmor profiles. Exec currently inherits the `Privileged` +// configuration of the container. See https://github.com/moby/moby/pull/31773#discussion_r105586900 +// +// This behavior may change in future, but test for the behavior to prevent it +// from being changed accidentally. +func TestExecSetPlatformOptPrivileged(t *testing.T) { + if !apparmor.IsEnabled() { + t.Skip("requires AppArmor to be enabled") + } + d := &Daemon{} + c := &container.Container{AppArmorProfile: "my-custom-profile"} + ec := &exec.Config{Privileged: true} + p := &specs.Process{} + + err := d.execSetPlatformOpt(c, ec, p) + assert.NilError(t, err) + assert.Equal(t, "my-custom-profile", p.ApparmorProfile) + + c.HostConfig = &containertypes.HostConfig{Privileged: true} + err = d.execSetPlatformOpt(c, ec, p) + assert.NilError(t, err) + assert.Equal(t, "unconfined", p.ApparmorProfile) +} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go index 52c23a3c2..737e161ed 100644 --- a/vendor/github.com/docker/docker/daemon/export.go +++ b/vendor/github.com/docker/docker/daemon/export.go @@ -61,12 +61,12 @@ func (daemon *Daemon) containerExport(container *container.Container) (arch io.R } }() - _, err = rwlayer.Mount(container.GetMountLabel()) + basefs, err := rwlayer.Mount(container.GetMountLabel()) if err != nil { return nil, err } - archive, err := archivePath(container.BaseFS, container.BaseFS.Path(), &archive.TarOptions{ + archive, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: daemon.idMappings.UIDs(), GIDMaps: daemon.idMappings.GIDs(), diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go index d0641abe3..2338ad320 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs_test.go @@ -17,8 +17,8 @@ import ( "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/stringid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) var ( @@ -189,7 +189,7 @@ func TestCleanupWithNoDirs(t *testing.T) { defer os.RemoveAll(tmp) err := d.Cleanup() - assert.NoError(t, err) + assert.Check(t, err) } func TestCleanupWithDir(t *testing.T) { @@ -210,11 +210,11 @@ func TestMountedFalseResponse(t *testing.T) { defer os.RemoveAll(tmp) err := d.Create("1", "", nil) - require.NoError(t, err) + assert.NilError(t, err) response, err := d.mounted(d.getDiffPath("1")) - require.NoError(t, err) - assert.False(t, response) + assert.NilError(t, err) + assert.Check(t, !response) } func TestMountedTrueResponse(t *testing.T) { @@ -223,16 +223,16 @@ func TestMountedTrueResponse(t *testing.T) { defer d.Cleanup() err := d.Create("1", "", nil) - require.NoError(t, err) + assert.NilError(t, err) err = d.Create("2", "1", nil) - require.NoError(t, err) + assert.NilError(t, err) _, err = d.Get("2", "") - require.NoError(t, err) + assert.NilError(t, err) response, err := d.mounted(d.pathCache["2"]) - require.NoError(t, err) - assert.True(t, response) + assert.NilError(t, err) + assert.Check(t, response) } func TestMountWithParent(t *testing.T) { @@ -567,7 +567,7 @@ func TestStatus(t *testing.T) { } status := d.Status() - assert.Len(t, status, 4) + assert.Check(t, is.Len(status, 4)) rootDir := status[0] dirs := status[2] @@ -670,18 +670,18 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { current = hash(current) err := d.CreateReadWrite(current, parent, nil) - require.NoError(t, err, "current layer %d", i) + assert.NilError(t, err, "current layer %d", i) point, err := driverGet(d, current, "") - require.NoError(t, err, "current layer %d", i) + assert.NilError(t, err, "current layer %d", i) f, err := os.Create(path.Join(point, current)) - require.NoError(t, err, "current layer %d", i) + assert.NilError(t, err, "current layer %d", i) f.Close() if i%10 == 0 { err := os.Remove(path.Join(point, parent)) - require.NoError(t, err, "current layer %d", i) + assert.NilError(t, err, "current layer %d", i) expected-- } last = current @@ -689,10 +689,10 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { // Perform the actual mount for the top most image point, err := driverGet(d, last, "") - require.NoError(t, err) + assert.NilError(t, err) files, err := ioutil.ReadDir(point) - require.NoError(t, err) - assert.Len(t, files, expected) + assert.NilError(t, err) + assert.Check(t, is.Len(files, expected)) } func TestMountMoreThan42Layers(t *testing.T) { diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go index 6d4387c94..a09bb2637 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy_test.go @@ -14,8 +14,8 @@ import ( "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/system" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/sys/unix" ) @@ -24,16 +24,16 @@ func TestIsCopyFileRangeSyscallAvailable(t *testing.T) { // 1. That copyFileRangeEnabled is being set to true when copy_file_range syscall is available // 2. That isCopyFileRangeSyscallAvailable() works on "new" kernels v, err := kernel.GetKernelVersion() - require.NoError(t, err) + assert.NilError(t, err) copyWithFileRange := true copyWithFileClone := false doCopyTest(t, ©WithFileRange, ©WithFileClone) if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 5, Minor: 0}) < 0 { - assert.False(t, copyWithFileRange) + assert.Check(t, !copyWithFileRange) } else { - assert.True(t, copyWithFileRange) + assert.Check(t, copyWithFileRange) } } @@ -52,47 +52,47 @@ func TestCopyWithoutRange(t *testing.T) { func TestCopyDir(t *testing.T) { srcDir, err := ioutil.TempDir("", "srcDir") - require.NoError(t, err) + assert.NilError(t, err) populateSrcDir(t, srcDir, 3) dstDir, err := ioutil.TempDir("", "testdst") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dstDir) - assert.NoError(t, DirCopy(srcDir, dstDir, Content, false)) - require.NoError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { + assert.Check(t, DirCopy(srcDir, dstDir, Content, false)) + assert.NilError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(srcDir, srcPath) - require.NoError(t, err) + assert.NilError(t, err) if relPath == "." { return nil } dstPath := filepath.Join(dstDir, relPath) - require.NoError(t, err) + assert.NilError(t, err) // If we add non-regular dirs and files to the test // then we need to add more checks here. dstFileInfo, err := os.Lstat(dstPath) - require.NoError(t, err) + assert.NilError(t, err) srcFileSys := f.Sys().(*syscall.Stat_t) dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t) t.Log(relPath) if srcFileSys.Dev == dstFileSys.Dev { - assert.NotEqual(t, srcFileSys.Ino, dstFileSys.Ino) + assert.Check(t, srcFileSys.Ino != dstFileSys.Ino) } // Todo: check size, and ctim is not equal /// on filesystems that have granular ctimes - assert.Equal(t, srcFileSys.Mode, dstFileSys.Mode) - assert.Equal(t, srcFileSys.Uid, dstFileSys.Uid) - assert.Equal(t, srcFileSys.Gid, dstFileSys.Gid) - assert.Equal(t, srcFileSys.Mtim, dstFileSys.Mtim) + assert.Check(t, is.DeepEqual(srcFileSys.Mode, dstFileSys.Mode)) + assert.Check(t, is.DeepEqual(srcFileSys.Uid, dstFileSys.Uid)) + assert.Check(t, is.DeepEqual(srcFileSys.Gid, dstFileSys.Gid)) + assert.Check(t, is.DeepEqual(srcFileSys.Mtim, dstFileSys.Mtim)) return nil })) @@ -115,22 +115,22 @@ func populateSrcDir(t *testing.T, srcDir string, remainingDepth int) { for i := 0; i < 10; i++ { dirName := filepath.Join(srcDir, fmt.Sprintf("srcdir-%d", i)) // Owner all bits set - require.NoError(t, os.Mkdir(dirName, randomMode(0700))) + assert.NilError(t, os.Mkdir(dirName, randomMode(0700))) populateSrcDir(t, dirName, remainingDepth-1) - require.NoError(t, system.Chtimes(dirName, aTime, mTime)) + assert.NilError(t, system.Chtimes(dirName, aTime, mTime)) } for i := 0; i < 10; i++ { fileName := filepath.Join(srcDir, fmt.Sprintf("srcfile-%d", i)) // Owner read bit set - require.NoError(t, ioutil.WriteFile(fileName, []byte{}, randomMode(0400))) - require.NoError(t, system.Chtimes(fileName, aTime, mTime)) + assert.NilError(t, ioutil.WriteFile(fileName, []byte{}, randomMode(0400))) + assert.NilError(t, system.Chtimes(fileName, aTime, mTime)) } } func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) { dir, err := ioutil.TempDir("", "docker-copy-check") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dir) srcFilename := filepath.Join(dir, "srcFilename") dstFilename := filepath.Join(dir, "dstilename") @@ -138,42 +138,42 @@ func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) { r := rand.New(rand.NewSource(0)) buf := make([]byte, 1024) _, err = r.Read(buf) - require.NoError(t, err) - require.NoError(t, ioutil.WriteFile(srcFilename, buf, 0777)) + assert.NilError(t, err) + assert.NilError(t, ioutil.WriteFile(srcFilename, buf, 0777)) fileinfo, err := os.Stat(srcFilename) - require.NoError(t, err) + assert.NilError(t, err) - require.NoError(t, copyRegular(srcFilename, dstFilename, fileinfo, copyWithFileRange, copyWithFileClone)) + assert.NilError(t, copyRegular(srcFilename, dstFilename, fileinfo, copyWithFileRange, copyWithFileClone)) readBuf, err := ioutil.ReadFile(dstFilename) - require.NoError(t, err) - assert.Equal(t, buf, readBuf) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(buf, readBuf)) } func TestCopyHardlink(t *testing.T) { var srcFile1FileInfo, srcFile2FileInfo, dstFile1FileInfo, dstFile2FileInfo unix.Stat_t srcDir, err := ioutil.TempDir("", "srcDir") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(srcDir) dstDir, err := ioutil.TempDir("", "dstDir") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dstDir) srcFile1 := filepath.Join(srcDir, "file1") srcFile2 := filepath.Join(srcDir, "file2") dstFile1 := filepath.Join(dstDir, "file1") dstFile2 := filepath.Join(dstDir, "file2") - require.NoError(t, ioutil.WriteFile(srcFile1, []byte{}, 0777)) - require.NoError(t, os.Link(srcFile1, srcFile2)) + assert.NilError(t, ioutil.WriteFile(srcFile1, []byte{}, 0777)) + assert.NilError(t, os.Link(srcFile1, srcFile2)) - assert.NoError(t, DirCopy(srcDir, dstDir, Content, false)) + assert.Check(t, DirCopy(srcDir, dstDir, Content, false)) - require.NoError(t, unix.Stat(srcFile1, &srcFile1FileInfo)) - require.NoError(t, unix.Stat(srcFile2, &srcFile2FileInfo)) - require.Equal(t, srcFile1FileInfo.Ino, srcFile2FileInfo.Ino) + assert.NilError(t, unix.Stat(srcFile1, &srcFile1FileInfo)) + assert.NilError(t, unix.Stat(srcFile2, &srcFile2FileInfo)) + assert.Equal(t, srcFile1FileInfo.Ino, srcFile2FileInfo.Ino) - require.NoError(t, unix.Stat(dstFile1, &dstFile1FileInfo)) - require.NoError(t, unix.Stat(dstFile2, &dstFile2FileInfo)) - assert.Equal(t, dstFile1FileInfo.Ino, dstFile2FileInfo.Ino) + assert.NilError(t, unix.Stat(dstFile1, &dstFile1FileInfo)) + assert.NilError(t, unix.Stat(dstFile2, &dstFile2FileInfo)) + assert.Check(t, is.Equal(dstFile1FileInfo.Ino, dstFile2FileInfo.Ino)) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go index 777bac7af..4a29465f1 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_test.go @@ -6,32 +6,31 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestIsEmptyDir(t *testing.T) { tmp, err := ioutil.TempDir("", "test-is-empty-dir") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmp) d := filepath.Join(tmp, "empty-dir") err = os.Mkdir(d, 0755) - require.NoError(t, err) + assert.NilError(t, err) empty := isEmptyDir(d) - assert.True(t, empty) + assert.Check(t, empty) d = filepath.Join(tmp, "dir-with-subdir") err = os.MkdirAll(filepath.Join(d, "subdir"), 0755) - require.NoError(t, err) + assert.NilError(t, err) empty = isEmptyDir(d) - assert.False(t, empty) + assert.Check(t, !empty) d = filepath.Join(tmp, "dir-with-empty-file") err = os.Mkdir(d, 0755) - require.NoError(t, err) + assert.NilError(t, err) _, err = ioutil.TempFile(d, "file") - require.NoError(t, err) + assert.NilError(t, err) empty = isEmptyDir(d) - assert.False(t, empty) + assert.Check(t, !empty) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go index 2eb4184c0..1b221dabe 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go @@ -9,7 +9,7 @@ import ( contdriver "github.com/containerd/continuity/driver" "github.com/docker/docker/pkg/stringid" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) // DriverBenchExists benchmarks calls to exist @@ -251,7 +251,7 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d } b.StopTimer() - require.Equal(b, content, c) + assert.DeepEqual(b, content, c) b.StartTimer() } } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go index a7bdd8cda..1e068535f 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go @@ -15,9 +15,9 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/quota" "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-units" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + units "github.com/docker/go-units" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/sys/unix" ) @@ -36,9 +36,9 @@ type Driver struct { func newDriver(t testing.TB, name string, options []string) *Driver { root, err := ioutil.TempDir("", "docker-graphtest-") - require.NoError(t, err) + assert.NilError(t, err) - require.NoError(t, os.MkdirAll(root, 0755)) + assert.NilError(t, os.MkdirAll(root, 0755)) d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) if err != nil { t.Logf("graphdriver: %v\n", err) @@ -85,10 +85,10 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str defer PutDriver(t) err := driver.Create("empty", "", nil) - require.NoError(t, err) + assert.NilError(t, err) defer func() { - require.NoError(t, driver.Remove("empty")) + assert.NilError(t, driver.Remove("empty")) }() if !driver.Exists("empty") { @@ -96,14 +96,14 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str } dir, err := driver.Get("empty", "") - require.NoError(t, err) + assert.NilError(t, err) verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0) // Verify that the directory is empty fis, err := readDir(dir, dir.Path()) - require.NoError(t, err) - assert.Len(t, fis, 0) + assert.NilError(t, err) + assert.Check(t, is.Len(fis, 0)) driver.Put("empty") } @@ -115,7 +115,7 @@ func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...stri createBase(t, driver, "Base") defer func() { - require.NoError(t, driver.Remove("Base")) + assert.NilError(t, driver.Remove("Base")) }() verifyBase(t, driver, "Base") } @@ -127,13 +127,13 @@ func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...stri createBase(t, driver, "Base") defer func() { - require.NoError(t, driver.Remove("Base")) + assert.NilError(t, driver.Remove("Base")) }() err := driver.Create("Snap", "Base", nil) - require.NoError(t, err) + assert.NilError(t, err) defer func() { - require.NoError(t, driver.Remove("Snap")) + assert.NilError(t, driver.Remove("Snap")) }() verifyBase(t, driver, "Snap") diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go index 4659bf2a1..3103df150 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go @@ -9,25 +9,25 @@ import ( contdriver "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/sys/unix" ) func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { fi, err := os.Stat(path) - require.NoError(t, err) + assert.NilError(t, err) actual := fi.Mode() - assert.Equal(t, mode&os.ModeType, actual&os.ModeType, path) - assert.Equal(t, mode&os.ModePerm, actual&os.ModePerm, path) - assert.Equal(t, mode&os.ModeSticky, actual&os.ModeSticky, path) - assert.Equal(t, mode&os.ModeSetuid, actual&os.ModeSetuid, path) - assert.Equal(t, mode&os.ModeSetgid, actual&os.ModeSetgid, path) + assert.Check(t, is.Equal(mode&os.ModeType, actual&os.ModeType), path) + assert.Check(t, is.Equal(mode&os.ModePerm, actual&os.ModePerm), path) + assert.Check(t, is.Equal(mode&os.ModeSticky, actual&os.ModeSticky), path) + assert.Check(t, is.Equal(mode&os.ModeSetuid, actual&os.ModeSetuid), path) + assert.Check(t, is.Equal(mode&os.ModeSetgid, actual&os.ModeSetgid), path) if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - assert.Equal(t, uid, stat.Uid, path) - assert.Equal(t, gid, stat.Gid, path) + assert.Check(t, is.Equal(uid, stat.Uid), path) + assert.Check(t, is.Equal(gid, stat.Gid), path) } } @@ -37,24 +37,24 @@ func createBase(t testing.TB, driver graphdriver.Driver, name string) { defer unix.Umask(oldmask) err := driver.CreateReadWrite(name, "", nil) - require.NoError(t, err) + assert.NilError(t, err) dirFS, err := driver.Get(name, "") - require.NoError(t, err) + assert.NilError(t, err) defer driver.Put(name) subdir := dirFS.Join(dirFS.Path(), "a subdir") - require.NoError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky)) - require.NoError(t, dirFS.Lchown(subdir, 1, 2)) + assert.NilError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky)) + assert.NilError(t, dirFS.Lchown(subdir, 1, 2)) file := dirFS.Join(dirFS.Path(), "a file") err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid) - require.NoError(t, err) + assert.NilError(t, err) } func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { dirFS, err := driver.Get(name, "") - require.NoError(t, err) + assert.NilError(t, err) defer driver.Put(name) subdir := dirFS.Join(dirFS.Path(), "a subdir") @@ -64,6 +64,6 @@ func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) files, err := readDir(dirFS, dirFS.Path()) - require.NoError(t, err) - assert.Len(t, files, 2) + assert.NilError(t, err) + assert.Check(t, is.Len(files, 2)) } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go index 939044fdc..2f1bf593d 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota_test.go @@ -10,9 +10,9 @@ import ( "path/filepath" "testing" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/sys/unix" ) @@ -80,14 +80,14 @@ func wrapMountTest(imageFileName string, enableQuota bool, testFunc func(t *test } } - require.NoError(t, err, "mount failed: %s", out) + assert.NilError(t, err, "mount failed: %s", out) defer func() { - require.NoError(t, unix.Unmount(mountPoint, 0)) + assert.NilError(t, unix.Unmount(mountPoint, 0)) }() backingFsDev, err := makeBackingFsDev(mountPoint) - require.NoError(t, err) + assert.NilError(t, err) testFunc(t, mountPoint, backingFsDev) } @@ -95,58 +95,58 @@ func wrapMountTest(imageFileName string, enableQuota bool, testFunc func(t *test func testBlockDevQuotaDisabled(t *testing.T, mountPoint, backingFsDev string) { hasSupport, err := hasQuotaSupport(backingFsDev) - require.NoError(t, err) - assert.False(t, hasSupport) + assert.NilError(t, err) + assert.Check(t, !hasSupport) } func testBlockDevQuotaEnabled(t *testing.T, mountPoint, backingFsDev string) { hasSupport, err := hasQuotaSupport(backingFsDev) - require.NoError(t, err) - assert.True(t, hasSupport) + assert.NilError(t, err) + assert.Check(t, hasSupport) } func wrapQuotaTest(testFunc func(t *testing.T, ctrl *Control, mountPoint, testDir, testSubDir string)) func(t *testing.T, mountPoint, backingFsDev string) { return func(t *testing.T, mountPoint, backingFsDev string) { testDir, err := ioutil.TempDir(mountPoint, "per-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(testDir) ctrl, err := NewControl(testDir) - require.NoError(t, err) + assert.NilError(t, err) testSubDir, err := ioutil.TempDir(testDir, "quota-test") - require.NoError(t, err) + assert.NilError(t, err) testFunc(t, ctrl, mountPoint, testDir, testSubDir) } } func testSmallerThanQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) { - require.NoError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) + assert.NilError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) smallerThanQuotaFile := filepath.Join(testSubDir, "smaller-than-quota") - require.NoError(t, ioutil.WriteFile(smallerThanQuotaFile, make([]byte, testQuotaSize/2), 0644)) - require.NoError(t, os.Remove(smallerThanQuotaFile)) + assert.NilError(t, ioutil.WriteFile(smallerThanQuotaFile, make([]byte, testQuotaSize/2), 0644)) + assert.NilError(t, os.Remove(smallerThanQuotaFile)) } func testBiggerThanQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) { // Make sure the quota is being enforced // TODO: When we implement this under EXT4, we need to shed CAP_SYS_RESOURCE, otherwise // we're able to violate quota without issue - require.NoError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) + assert.NilError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) biggerThanQuotaFile := filepath.Join(testSubDir, "bigger-than-quota") err := ioutil.WriteFile(biggerThanQuotaFile, make([]byte, testQuotaSize+1), 0644) - require.Error(t, err) + assert.Assert(t, is.ErrorContains(err, "")) if err == io.ErrShortWrite { - require.NoError(t, os.Remove(biggerThanQuotaFile)) + assert.NilError(t, os.Remove(biggerThanQuotaFile)) } } func testRetrieveQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) { // Validate that we can retrieve quota - require.NoError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) + assert.NilError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize})) var q Quota - require.NoError(t, ctrl.GetQuota(testSubDir, &q)) - assert.EqualValues(t, testQuotaSize, q.Size) + assert.NilError(t, ctrl.GetQuota(testSubDir, &q)) + assert.Check(t, is.Equal(uint64(testQuotaSize), q.Size)) } diff --git a/vendor/github.com/docker/docker/daemon/images/image_history.go b/vendor/github.com/docker/docker/daemon/images/image_history.go index 2b9229263..b4ca25b1b 100644 --- a/vendor/github.com/docker/docker/daemon/images/image_history.go +++ b/vendor/github.com/docker/docker/daemon/images/image_history.go @@ -7,6 +7,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" ) // ImageHistory returns a slice of ImageHistory structures for the specified image @@ -31,7 +32,9 @@ func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, if len(img.RootFS.DiffIDs) <= layerCounter { return nil, fmt.Errorf("too many non-empty layers in History section") } - + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, system.ErrNotSupportedOperatingSystem + } rootFS.Append(img.RootFS.DiffIDs[layerCounter]) l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID()) if err != nil { diff --git a/vendor/github.com/docker/docker/daemon/images/images.go b/vendor/github.com/docker/docker/daemon/images/images.go index 46056f15b..49212341c 100644 --- a/vendor/github.com/docker/docker/daemon/images/images.go +++ b/vendor/github.com/docker/docker/daemon/images/images.go @@ -271,7 +271,9 @@ func (i *ImageService) SquashImage(id, parent string) (string, error) { rootFS := image.NewRootFS() parentImg = &image.Image{RootFS: rootFS} } - + if !system.IsOSSupported(img.OperatingSystem()) { + return "", errors.Wrap(err, system.ErrNotSupportedOperatingSystem.Error()) + } l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) if err != nil { return "", errors.Wrap(err, "error getting image layer") diff --git a/vendor/github.com/docker/docker/daemon/images/service.go b/vendor/github.com/docker/docker/daemon/images/service.go index 70a8bf445..4af48959b 100644 --- a/vendor/github.com/docker/docker/daemon/images/service.go +++ b/vendor/github.com/docker/docker/daemon/images/service.go @@ -77,7 +77,7 @@ type ImageService struct { // CountImages returns the number of images stored by ImageService // called from info.go func (i *ImageService) CountImages() int { - return len(i.imageStore.Map()) + return i.imageStore.Len() } // Children returns the children image.IDs for a parent image. diff --git a/vendor/github.com/docker/docker/daemon/info_unix_test.go b/vendor/github.com/docker/docker/daemon/info_unix_test.go index 92a336162..7ff100932 100644 --- a/vendor/github.com/docker/docker/daemon/info_unix_test.go +++ b/vendor/github.com/docker/docker/daemon/info_unix_test.go @@ -7,7 +7,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/dockerversion" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestParseInitVersion(t *testing.T) { @@ -43,10 +44,10 @@ func TestParseInitVersion(t *testing.T) { for _, test := range tests { ver, err := parseInitVersion(string(test.version)) if test.invalid { - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) } else { - assert.NoError(t, err) + assert.Check(t, err) } - assert.Equal(t, test.result, ver) + assert.Check(t, is.DeepEqual(test.result, ver)) } } diff --git a/vendor/github.com/docker/docker/daemon/inspect_test.go b/vendor/github.com/docker/docker/daemon/inspect_test.go index c10cc5679..d1ad5b0e0 100644 --- a/vendor/github.com/docker/docker/daemon/inspect_test.go +++ b/vendor/github.com/docker/docker/daemon/inspect_test.go @@ -7,8 +7,8 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/exec" - - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestGetInspectData(t *testing.T) { @@ -25,9 +25,9 @@ func TestGetInspectData(t *testing.T) { } _, err := d.getInspectData(c) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) c.Dead = true _, err = d.getInspectData(c) - assert.NoError(t, err) + assert.Check(t, err) } diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter_test.go b/vendor/github.com/docker/docker/daemon/logger/adapter_test.go index 25abab571..94d14eaef 100644 --- a/vendor/github.com/docker/docker/daemon/logger/adapter_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/adapter_test.go @@ -10,7 +10,8 @@ import ( "github.com/docker/docker/api/types/plugins/logdriver" protoio "github.com/gogo/protobuf/io" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) // mockLoggingPlugin implements the loggingPlugin interface for testing purposes @@ -88,7 +89,7 @@ func (l *mockLoggingPlugin) ReadLogs(info Info, config ReadConfig) (io.ReadClose func newMockPluginAdapter(t *testing.T) Logger { r, w := io.Pipe() f, err := ioutil.TempFile("", "mock-plugin-adapter") - assert.NoError(t, err) + assert.Check(t, err) enc := logdriver.NewLogEntryEncoder(w) a := &pluginAdapterWithRead{ @@ -116,11 +117,11 @@ func TestAdapterReadLogs(t *testing.T) { } for _, msg := range testMsg { m := msg.copy() - assert.NoError(t, l.Log(m)) + assert.Check(t, l.Log(m)) } lr, ok := l.(LogReader) - assert.NotNil(t, ok) + assert.Check(t, ok, "Logger does not implement LogReader") lw := lr.ReadLogs(ReadConfig{}) @@ -135,7 +136,7 @@ func TestAdapterReadLogs(t *testing.T) { select { case _, ok := <-lw.Msg: - assert.False(t, ok, "expected message channel to be closed") + assert.Check(t, !ok, "expected message channel to be closed") case <-time.After(10 * time.Second): t.Fatal("timeout waiting for message channel to close") @@ -153,11 +154,11 @@ func TestAdapterReadLogs(t *testing.T) { } x := Message{Line: []byte("Too infinity and beyond!"), Timestamp: time.Now()} - assert.NoError(t, l.Log(x.copy())) + assert.Check(t, l.Log(x.copy())) select { case msg, ok := <-lw.Msg: - assert.NotNil(t, ok, "message channel unexpectedly closed") + assert.Check(t, ok, "message channel unexpectedly closed") testMessageEqual(t, &x, msg) case <-time.After(10 * time.Second): t.Fatal("timeout reading logs") @@ -166,15 +167,15 @@ func TestAdapterReadLogs(t *testing.T) { l.Close() select { case msg, ok := <-lw.Msg: - assert.False(t, ok, "expected message channel to be closed") - assert.Nil(t, msg) + assert.Check(t, !ok, "expected message channel to be closed") + assert.Check(t, is.Nil(msg)) case <-time.After(10 * time.Second): t.Fatal("timeout waiting for logger to close") } } func testMessageEqual(t *testing.T, a, b *Message) { - assert.Equal(t, a.Line, b.Line) - assert.Equal(t, a.Timestamp.UnixNano(), b.Timestamp.UnixNano()) - assert.Equal(t, a.Source, b.Source) + assert.Check(t, is.DeepEqual(a.Line, b.Line)) + assert.Check(t, is.DeepEqual(a.Timestamp.UnixNano(), b.Timestamp.UnixNano())) + assert.Check(t, is.Equal(a.Source, b.Source)) } diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go index 835379b3b..d6312b660 100644 --- a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go @@ -69,6 +69,8 @@ type logStream struct { sequenceToken *string } +var _ logger.SizedLogger = &logStream{} + type api interface { CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go index 080157b2e..4a37d9816 100644 --- a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs_test.go @@ -21,7 +21,8 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/dockerversion" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) const ( @@ -544,17 +545,17 @@ func TestCollectBatchMultilinePattern(t *testing.T) { // Verify single multiline event argument := <-mockClient.putLogEventsArgument - assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") - assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") - assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n"+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") stream.Close() // Verify single event argument = <-mockClient.putLogEventsArgument - assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") - assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") - assert.Equal(t, "xxxx "+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal("xxxx "+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") } func BenchmarkCollectBatch(b *testing.B) { @@ -657,9 +658,9 @@ func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { // Verify single multiline event is flushed after maximum event buffer age (batchPublishFrequency) argument := <-mockClient.putLogEventsArgument - assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") - assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") - assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n"+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") // Log an event 1 second later stream.Log(&logger.Message{ @@ -672,9 +673,9 @@ func TestCollectBatchMultilinePatternMaxEventAge(t *testing.T) { // Verify the event buffer is truly flushed - we should only receive a single event argument = <-mockClient.putLogEventsArgument - assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") - assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") - assert.Equal(t, logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") stream.Close() } @@ -719,9 +720,9 @@ func TestCollectBatchMultilinePatternNegativeEventAge(t *testing.T) { // Verify single multiline event is flushed with a negative event buffer age argument := <-mockClient.putLogEventsArgument - assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") - assert.Equal(t, 1, len(argument.LogEvents), "Expected single multiline event") - assert.Equal(t, logline+"\n"+logline+"\n", *argument.LogEvents[0].Message, "Received incorrect multiline message") + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(1, len(argument.LogEvents)), "Expected single multiline event") + assert.Check(t, is.Equal(logline+"\n"+logline+"\n", *argument.LogEvents[0].Message), "Received incorrect multiline message") stream.Close() } @@ -772,10 +773,10 @@ func TestCollectBatchMultilinePatternMaxEventSize(t *testing.T) { // We expect a maximum sized event with no new line characters and a // second short event with a new line character at the end argument := <-mockClient.putLogEventsArgument - assert.NotNil(t, argument, "Expected non-nil PutLogEventsInput") - assert.Equal(t, 2, len(argument.LogEvents), "Expected two events") - assert.Equal(t, longline, *argument.LogEvents[0].Message, "Received incorrect multiline message") - assert.Equal(t, shortline+"\n", *argument.LogEvents[1].Message, "Received incorrect multiline message") + assert.Check(t, argument != nil, "Expected non-nil PutLogEventsInput") + assert.Check(t, is.Equal(2, len(argument.LogEvents)), "Expected two events") + assert.Check(t, is.Equal(longline, *argument.LogEvents[0].Message), "Received incorrect multiline message") + assert.Check(t, is.Equal(shortline+"\n", *argument.LogEvents[1].Message), "Received incorrect multiline message") stream.Close() } @@ -1069,8 +1070,8 @@ func TestParseLogOptionsMultilinePattern(t *testing.T) { } multilinePattern, err := parseMultilineOptions(info) - assert.Nil(t, err, "Received unexpected error") - assert.True(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") + assert.Check(t, err, "Received unexpected error") + assert.Check(t, multilinePattern.MatchString("xxxx"), "No multiline pattern match found") } func TestParseLogOptionsDatetimeFormat(t *testing.T) { @@ -1094,8 +1095,8 @@ func TestParseLogOptionsDatetimeFormat(t *testing.T) { }, } multilinePattern, err := parseMultilineOptions(info) - assert.Nil(t, err, "Received unexpected error") - assert.True(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") + assert.Check(t, err, "Received unexpected error") + assert.Check(t, multilinePattern.MatchString(dt.match), "No multiline pattern match found") }) } } @@ -1109,8 +1110,8 @@ func TestValidateLogOptionsDatetimeFormatAndMultilinePattern(t *testing.T) { conflictingLogOptionsError := "you cannot configure log opt 'awslogs-datetime-format' and 'awslogs-multiline-pattern' at the same time" err := ValidateLogOpt(cfg) - assert.NotNil(t, err, "Expected an error") - assert.Equal(t, err.Error(), conflictingLogOptionsError, "Received invalid error") + assert.Check(t, err != nil, "Expected an error") + assert.Check(t, is.Equal(err.Error(), conflictingLogOptionsError), "Received invalid error") } func TestCreateTagSuccess(t *testing.T) { @@ -1143,11 +1144,6 @@ func TestCreateTagSuccess(t *testing.T) { } } -func TestIsSizedLogger(t *testing.T) { - awslogs := &logStream{} - assert.Implements(t, (*logger.SizedLogger)(nil), awslogs, "awslogs should implement SizedLogger") -} - func BenchmarkUnwrapEvents(b *testing.B) { events := make([]wrappedEvent, maximumLogEventsPerPut) for i := 0; i < maximumLogEventsPerPut; i++ { @@ -1157,11 +1153,10 @@ func BenchmarkUnwrapEvents(b *testing.B) { } } - as := assert.New(b) b.ResetTimer() for i := 0; i < b.N; i++ { res := unwrapEvents(events) - as.Len(res, maximumLogEventsPerPut) + assert.Check(b, is.Len(res, maximumLogEventsPerPut)) } } @@ -1194,15 +1189,15 @@ func TestNewAWSLogsClientCredentialEndpointDetect(t *testing.T) { info.Config["awslogs-credentials-endpoint"] = "/creds" c, err := newAWSLogsClient(info) - assert.NoError(t, err) + assert.Check(t, err) client := c.(*cloudwatchlogs.CloudWatchLogs) creds, err := client.Config.Credentials.Get() - assert.NoError(t, err) + assert.Check(t, err) - assert.Equal(t, expectedAccessKeyID, creds.AccessKeyID) - assert.Equal(t, expectedSecretAccessKey, creds.SecretAccessKey) + assert.Check(t, is.Equal(expectedAccessKeyID, creds.AccessKeyID)) + assert.Check(t, is.Equal(expectedSecretAccessKey, creds.SecretAccessKey)) } func TestNewAWSLogsClientCredentialEnvironmentVariable(t *testing.T) { @@ -1224,15 +1219,15 @@ func TestNewAWSLogsClientCredentialEnvironmentVariable(t *testing.T) { } c, err := newAWSLogsClient(info) - assert.NoError(t, err) + assert.Check(t, err) client := c.(*cloudwatchlogs.CloudWatchLogs) creds, err := client.Config.Credentials.Get() - assert.NoError(t, err) + assert.Check(t, err) - assert.Equal(t, expectedAccessKeyID, creds.AccessKeyID) - assert.Equal(t, expectedSecretAccessKey, creds.SecretAccessKey) + assert.Check(t, is.Equal(expectedAccessKeyID, creds.AccessKeyID)) + assert.Check(t, is.Equal(expectedSecretAccessKey, creds.SecretAccessKey)) } @@ -1253,13 +1248,13 @@ func TestNewAWSLogsClientCredentialSharedFile(t *testing.T) { tmpfile, err := ioutil.TempFile("", "example") defer os.Remove(tmpfile.Name()) // clean up - assert.NoError(t, err) + assert.Check(t, err) _, err = tmpfile.Write(content) - assert.NoError(t, err) + assert.Check(t, err) err = tmpfile.Close() - assert.NoError(t, err) + assert.Check(t, err) os.Unsetenv("AWS_ACCESS_KEY_ID") os.Unsetenv("AWS_SECRET_ACCESS_KEY") @@ -1272,13 +1267,13 @@ func TestNewAWSLogsClientCredentialSharedFile(t *testing.T) { } c, err := newAWSLogsClient(info) - assert.NoError(t, err) + assert.Check(t, err) client := c.(*cloudwatchlogs.CloudWatchLogs) creds, err := client.Config.Credentials.Get() - assert.NoError(t, err) + assert.Check(t, err) - assert.Equal(t, expectedAccessKeyID, creds.AccessKeyID) - assert.Equal(t, expectedSecretAccessKey, creds.SecretAccessKey) + assert.Check(t, is.Equal(expectedAccessKeyID, creds.AccessKeyID)) + assert.Check(t, is.Equal(expectedSecretAccessKey, creds.SecretAccessKey)) } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go index 5a8045088..2b1e91d06 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -49,6 +49,9 @@ func New(info logger.Info) (logger.Logger, error) { if err != nil { return nil, err } + if capval <= 0 { + return nil, fmt.Errorf("max-size should be a positive numbler") + } } var maxFiles = 1 if maxFileString, ok := info.Config["max-file"]; ok { @@ -62,6 +65,18 @@ func New(info logger.Info) (logger.Logger, error) { } } + var compress bool + if compressString, ok := info.Config["compress"]; ok { + var err error + compress, err = strconv.ParseBool(compressString) + if err != nil { + return nil, err + } + if compress && (maxFiles == 1 || capval == -1) { + return nil, fmt.Errorf("compress cannot be true when max-file is less than 2 or max-size is not set") + } + } + attrs, err := info.ExtraAttributes(nil) if err != nil { return nil, err @@ -95,7 +110,7 @@ func New(info logger.Info) (logger.Logger, error) { return b, nil } - writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, marshalFunc, decodeFunc, 0640) + writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, marshalFunc, decodeFunc, 0640) if err != nil { return nil, err } @@ -139,6 +154,7 @@ func ValidateLogOpt(cfg map[string]string) error { switch key { case "max-file": case "max-size": + case "compress": case "labels": case "env": case "env-regex": diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go index 2f74e2609..2becd694b 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog_test.go @@ -2,6 +2,7 @@ package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelo import ( "bytes" + "compress/gzip" "encoding/json" "io/ioutil" "os" @@ -13,9 +14,9 @@ import ( "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestJSONFileLogger(t *testing.T) { @@ -63,7 +64,7 @@ func TestJSONFileLoggerWithTags(t *testing.T) { cname := "test-container" tmp, err := ioutil.TempDir("", "docker-logger-") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") @@ -76,26 +77,26 @@ func TestJSONFileLoggerWithTags(t *testing.T) { LogPath: filename, }) - require.NoError(t, err) + assert.NilError(t, err) defer l.Close() err = l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}) - require.NoError(t, err) + assert.NilError(t, err) err = l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}) - require.NoError(t, err) + assert.NilError(t, err) err = l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}) - require.NoError(t, err) + assert.NilError(t, err) res, err := ioutil.ReadFile(filename) - require.NoError(t, err) + assert.NilError(t, err) expected := `{"log":"line1\n","stream":"src1","attrs":{"tag":"a7317399f3f8/test-container"},"time":"0001-01-01T00:00:00Z"} {"log":"line2\n","stream":"src2","attrs":{"tag":"a7317399f3f8/test-container"},"time":"0001-01-01T00:00:00Z"} {"log":"line3\n","stream":"src3","attrs":{"tag":"a7317399f3f8/test-container"},"time":"0001-01-01T00:00:00Z"} ` - assert.Equal(t, expected, string(res)) + assert.Check(t, is.Equal(expected, string(res))) } func BenchmarkJSONFileLoggerLog(b *testing.B) { @@ -113,7 +114,7 @@ func BenchmarkJSONFileLoggerLog(b *testing.B) { "second": "label_foo", }, }) - require.NoError(b, err) + assert.NilError(b, err) defer jsonlogger.Close() msg := &logger.Message{ @@ -123,7 +124,7 @@ func BenchmarkJSONFileLoggerLog(b *testing.B) { } buf := bytes.NewBuffer(nil) - require.NoError(b, marshalMessage(msg, nil, buf)) + assert.NilError(b, marshalMessage(msg, nil, buf)) b.SetBytes(int64(buf.Len())) b.ResetTimer() @@ -142,7 +143,7 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { } defer os.RemoveAll(tmp) filename := filepath.Join(tmp, "container.log") - config := map[string]string{"max-file": "2", "max-size": "1k"} + config := map[string]string{"max-file": "3", "max-size": "1k", "compress": "true"} l, err := New(logger.Info{ ContainerID: cid, LogPath: filename, @@ -152,21 +153,55 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { t.Fatal(err) } defer l.Close() - for i := 0; i < 20; i++ { + for i := 0; i < 36; i++ { if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { t.Fatal(err) } } + res, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } + penUlt, err := ioutil.ReadFile(filename + ".1") + if err != nil { + if !os.IsNotExist(err) { + t.Fatal(err) + } + + file, err := os.Open(filename + ".1.gz") + defer file.Close() + if err != nil { + t.Fatal(err) + } + zipReader, err := gzip.NewReader(file) + defer zipReader.Close() + if err != nil { + t.Fatal(err) + } + penUlt, err = ioutil.ReadAll(zipReader) + if err != nil { + t.Fatal(err) + } + } + + file, err := os.Open(filename + ".2.gz") + defer file.Close() + if err != nil { + t.Fatal(err) + } + zipReader, err := gzip.NewReader(file) + defer zipReader.Close() + if err != nil { + t.Fatal(err) + } + antepenult, err := ioutil.ReadAll(zipReader) if err != nil { t.Fatal(err) } - expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} + expectedAntepenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} @@ -183,10 +218,27 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { {"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} ` - expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} + expectedPenultimate := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} {"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line20\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line21\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line22\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line23\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line24\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line25\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line26\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line27\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line28\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line29\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line30\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line31\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +` + expected := `{"log":"line32\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line33\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line34\n","stream":"src1","time":"0001-01-01T00:00:00Z"} +{"log":"line35\n","stream":"src1","time":"0001-01-01T00:00:00Z"} ` if string(res) != expected { @@ -195,7 +247,9 @@ func TestJSONFileLoggerWithOpts(t *testing.T) { if string(penUlt) != expectedPenultimate { t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) } - + if string(antepenult) != expectedAntepenultimate { + t.Fatalf("Wrong log content: %q, expected %q", antepenult, expectedAntepenultimate) + } } func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go index 370112435..b3bfe6b18 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go @@ -3,12 +3,12 @@ package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/js import ( "bytes" "encoding/json" + "fmt" "regexp" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestJSONLogsMarshalJSONBuf(t *testing.T) { @@ -35,8 +35,17 @@ func TestJSONLogsMarshalJSONBuf(t *testing.T) { for jsonLog, expression := range logs { var buf bytes.Buffer err := jsonLog.MarshalJSONBuf(&buf) - require.NoError(t, err) - assert.Regexp(t, regexp.MustCompile(expression), buf.String()) - assert.NoError(t, json.Unmarshal(buf.Bytes(), &map[string]interface{}{})) + assert.NilError(t, err) + + assert.Assert(t, regexP(buf.String(), expression)) + assert.NilError(t, json.Unmarshal(buf.Bytes(), &map[string]interface{}{})) + } +} + +func regexP(value string, pattern string) func() (bool, string) { + return func() (bool, string) { + re := regexp.MustCompile(pattern) + msg := fmt.Sprintf("%q did not match pattern %q", value, pattern) + return re.MatchString(value), msg } } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go index fee091eb6..76f299a0f 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestFastTimeMarshalJSONWithInvalidYear(t *testing.T) { @@ -22,14 +22,14 @@ func TestFastTimeMarshalJSONWithInvalidYear(t *testing.T) { func TestFastTimeMarshalJSON(t *testing.T) { aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) json, err := fastTimeMarshalJSON(aTime) - require.NoError(t, err) - assert.Equal(t, "\"2015-05-29T11:01:02.000000003Z\"", json) + assert.NilError(t, err) + assert.Check(t, is.Equal("\"2015-05-29T11:01:02.000000003Z\"", json)) location, err := time.LoadLocation("Europe/Paris") - require.NoError(t, err) + assert.NilError(t, err) aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) json, err = fastTimeMarshalJSON(aTime) - require.NoError(t, err) - assert.Equal(t, "\"2015-05-29T11:01:02.000000003+02:00\"", json) + assert.NilError(t, err) + assert.Check(t, is.Equal("\"2015-05-29T11:01:02.000000003+02:00\"", json)) } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go index 342b538c2..f89fabfe1 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read_test.go @@ -6,8 +6,8 @@ import ( "time" "github.com/docker/docker/daemon/logger" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/require" ) func BenchmarkJSONFileLoggerReadLogs(b *testing.B) { @@ -25,7 +25,7 @@ func BenchmarkJSONFileLoggerReadLogs(b *testing.B) { "second": "label_foo", }, }) - require.NoError(b, err) + assert.NilError(b, err) defer jsonlogger.Close() msg := &logger.Message{ @@ -35,7 +35,7 @@ func BenchmarkJSONFileLoggerReadLogs(b *testing.B) { } buf := bytes.NewBuffer(nil) - require.NoError(b, marshalMessage(msg, nil, buf)) + assert.NilError(b, marshalMessage(msg, nil, buf)) b.SetBytes(int64(buf.Len())) b.ResetTimer() diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index e646afc23..b4148ce64 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -2,17 +2,21 @@ package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutil import ( "bytes" + "compress/gzip" "context" + "encoding/json" "fmt" "io" "os" "strconv" + "strings" "sync" "time" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils/multireader" "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pubsub" "github.com/docker/docker/pkg/tailfile" "github.com/fsnotify/fsnotify" @@ -20,24 +24,81 @@ import ( "github.com/sirupsen/logrus" ) +const tmpLogfileSuffix = ".tmp" + +// rotateFileMetadata is a metadata of the gzip header of the compressed log file +type rotateFileMetadata struct { + LastTime time.Time `json:"lastTime,omitempty"` +} + +// refCounter is a counter of logfile being referenced +type refCounter struct { + mu sync.Mutex + counter map[string]int +} + +// Reference increase the reference counter for specified logfile +func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) { + rc.mu.Lock() + defer rc.mu.Unlock() + + var ( + file *os.File + err error + ) + _, ok := rc.counter[fileName] + file, err = openRefFile(fileName, ok) + if err != nil { + return nil, err + } + + if ok { + rc.counter[fileName]++ + } else if file != nil { + rc.counter[file.Name()] = 1 + } + + return file, nil +} + +// Dereference reduce the reference counter for specified logfile +func (rc *refCounter) Dereference(fileName string) error { + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.counter[fileName]-- + if rc.counter[fileName] <= 0 { + delete(rc.counter, fileName) + err := os.Remove(fileName) + if err != nil { + return err + } + } + return nil +} + // LogFile is Logger implementation for default Docker logging. type LogFile struct { - f *os.File // store for closing - closed bool - mu sync.RWMutex - capacity int64 //maximum size of each file - currentSize int64 // current size of the latest file - maxFiles int //maximum number of files - notifyRotate *pubsub.Publisher - marshal logger.MarshalFunc - createDecoder makeDecoderFunc - perms os.FileMode + mu sync.RWMutex // protects the logfile access + f *os.File // store for closing + closed bool + rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed + capacity int64 // maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int // maximum number of files + compress bool // whether old versions of log files are compressed + lastTimestamp time.Time // timestamp of the last log + filesRefCounter refCounter // keep reference-counted of decompressed files + notifyRotate *pubsub.Publisher + marshal logger.MarshalFunc + createDecoder makeDecoderFunc + perms os.FileMode } type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) //NewLogFile creates new LogFile -func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) { +func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) { log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) if err != nil { return nil, err @@ -49,14 +110,16 @@ func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger. } return &LogFile{ - f: log, - capacity: capacity, - currentSize: size, - maxFiles: maxFiles, - notifyRotate: pubsub.NewPublisher(0, 1), - marshal: marshaller, - createDecoder: decodeFunc, - perms: perms, + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + compress: compress, + filesRefCounter: refCounter{counter: make(map[string]int)}, + notifyRotate: pubsub.NewPublisher(0, 1), + marshal: marshaller, + createDecoder: decodeFunc, + perms: perms, }, nil } @@ -84,6 +147,7 @@ func (w *LogFile) WriteLogEntry(msg *logger.Message) error { n, err := w.f.Write(b) if err == nil { w.currentSize += int64(n) + w.lastTimestamp = msg.Timestamp } w.mu.Unlock() return err @@ -95,43 +159,108 @@ func (w *LogFile) checkCapacityAndRotate() error { } if w.currentSize >= w.capacity { - name := w.f.Name() + w.rotateMu.Lock() + fname := w.f.Name() if err := w.f.Close(); err != nil { + w.rotateMu.Unlock() return errors.Wrap(err, "error closing file") } - if err := rotate(name, w.maxFiles); err != nil { + if err := rotate(fname, w.maxFiles, w.compress); err != nil { + w.rotateMu.Unlock() return err } - file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) + file, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) if err != nil { + w.rotateMu.Unlock() return err } w.f = file w.currentSize = 0 w.notifyRotate.Publish(struct{}{}) + + if w.maxFiles <= 1 || !w.compress { + w.rotateMu.Unlock() + return nil + } + + go func() { + compressFile(fname+".1", w.lastTimestamp) + w.rotateMu.Unlock() + }() } return nil } -func rotate(name string, maxFiles int) error { +func rotate(name string, maxFiles int, compress bool) error { if maxFiles < 2 { return nil } + + var extension string + if compress { + extension = ".gz" + } for i := maxFiles - 1; i > 1; i-- { - toPath := name + "." + strconv.Itoa(i) - fromPath := name + "." + strconv.Itoa(i-1) + toPath := name + "." + strconv.Itoa(i) + extension + fromPath := name + "." + strconv.Itoa(i-1) + extension if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "error rotating old log entries") + return err } } if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "error rotating current log") + return err } + return nil } +func compressFile(fileName string, lastTimestamp time.Time) { + file, err := os.Open(fileName) + if err != nil { + logrus.Errorf("Failed to open log file: %v", err) + return + } + defer func() { + file.Close() + err := os.Remove(fileName) + if err != nil { + logrus.Errorf("Failed to remove source log file: %v", err) + } + }() + + outFile, err := os.OpenFile(fileName+".gz", os.O_CREATE|os.O_RDWR, 0640) + if err != nil { + logrus.Errorf("Failed to open or create gzip log file: %v", err) + return + } + defer func() { + outFile.Close() + if err != nil { + os.Remove(fileName + ".gz") + } + }() + + compressWriter := gzip.NewWriter(outFile) + defer compressWriter.Close() + + // Add the last log entry timestramp to the gzip header + extra := rotateFileMetadata{} + extra.LastTime = lastTimestamp + compressWriter.Header.Extra, err = json.Marshal(&extra) + if err != nil { + // Here log the error only and don't return since this is just an optimization. + logrus.Warningf("Failed to marshal JSON: %v", err) + } + + _, err = pools.Copy(compressWriter, file) + if err != nil { + logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file") + return + } +} + // MaxFiles return maximum number of files func (w *LogFile) MaxFiles() int { return w.maxFiles @@ -154,18 +283,6 @@ func (w *LogFile) Close() error { // ReadLogs decodes entries from log files and sends them the passed in watcher func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) { w.mu.RLock() - files, err := w.openRotatedFiles() - if err != nil { - w.mu.RUnlock() - watcher.Err <- err - return - } - defer func() { - for _, f := range files { - f.Close() - } - }() - currentFile, err := os.Open(w.f.Name()) if err != nil { w.mu.RUnlock() @@ -175,14 +292,20 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) defer currentFile.Close() currentChunk, err := newSectionReader(currentFile) - w.mu.RUnlock() - if err != nil { + w.mu.RUnlock() watcher.Err <- err return } if config.Tail != 0 { + files, err := w.openRotatedFiles(config) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + w.mu.RUnlock() seekers := make([]io.ReadSeeker, 0, len(files)+1) for _, f := range files { seekers = append(seekers, f) @@ -193,9 +316,20 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) if len(seekers) > 0 { tailFile(multireader.MultiReadSeeker(seekers...), watcher, w.createDecoder, config) } + for _, f := range files { + f.Close() + fileName := f.Name() + if strings.HasSuffix(fileName, tmpLogfileSuffix) { + err := w.filesRefCounter.Dereference(fileName) + if err != nil { + logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err) + } + } + } + + w.mu.RLock() } - w.mu.RLock() if !config.Follow || w.closed { w.mu.RUnlock() return @@ -207,13 +341,22 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) } -func (w *LogFile) openRotatedFiles() (files []*os.File, err error) { +func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { + w.rotateMu.Lock() + defer w.rotateMu.Unlock() + defer func() { if err == nil { return } for _, f := range files { f.Close() + if strings.HasSuffix(f.Name(), tmpLogfileSuffix) { + err := os.Remove(f.Name()) + if err != nil && !os.IsNotExist(err) { + logrus.Warningf("Failed to remove the logfile %q: %v", f.Name, err) + } + } } }() @@ -223,6 +366,28 @@ func (w *LogFile) openRotatedFiles() (files []*os.File, err error) { if !os.IsNotExist(err) { return nil, err } + + fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1) + decompressedFileName := fileName + tmpLogfileSuffix + tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) { + if exists { + return os.Open(refFileName) + } + return decompressfile(fileName, refFileName, config.Since) + }) + + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + continue + } + if tmpFile == nil { + // The log before `config.Since` does not need to read + break + } + + files = append(files, tmpFile) continue } files = append(files, f) @@ -231,6 +396,44 @@ func (w *LogFile) openRotatedFiles() (files []*os.File, err error) { return files, nil } +func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) { + cf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer cf.Close() + + rc, err := gzip.NewReader(cf) + if err != nil { + return nil, err + } + defer rc.Close() + + // Extract the last log entry timestramp from the gzip header + extra := &rotateFileMetadata{} + err = json.Unmarshal(rc.Header.Extra, extra) + if err == nil && extra.LastTime.Before(since) { + return nil, nil + } + + rs, err := os.OpenFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) + if err != nil { + return nil, err + } + + _, err = pools.Copy(rs, rc) + if err != nil { + rs.Close() + rErr := os.Remove(rs.Name()) + if rErr != nil && os.IsNotExist(rErr) { + logrus.Errorf("Failed to remove the logfile %q: %v", rs.Name(), rErr) + } + return nil, err + } + + return rs, nil +} + func newSectionReader(f *os.File) (*io.SectionReader, error) { // seek to the end to get the size // we'll leave this at the end of the file since section reader does not advance the reader diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go index 9744cb561..62895a6dd 100644 --- a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk_test.go @@ -11,8 +11,8 @@ import ( "time" "github.com/docker/docker/daemon/logger" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/env" - "github.com/stretchr/testify/require" ) // Validate options @@ -99,19 +99,19 @@ func TestNewWithProxy(t *testing.T) { }, ContainerID: "containeriid", }) - require.NoError(t, err) + assert.NilError(t, err) splunkLogger := logger.(*splunkLoggerInline) proxyFunc := splunkLogger.transport.Proxy - require.NotNil(t, proxyFunc) + assert.Assert(t, proxyFunc != nil) req, err := http.NewRequest("GET", splunkURL, nil) - require.NoError(t, err) + assert.NilError(t, err) proxyURL, err := proxyFunc(req) - require.NoError(t, err) - require.NotNil(t, proxyURL) - require.Equal(t, proxy, proxyURL.String()) + assert.NilError(t, err) + assert.Assert(t, proxyURL != nil) + assert.Equal(t, proxy, proxyURL.String()) } // Test default settings @@ -483,10 +483,10 @@ func TestRawFormat(t *testing.T) { } hostname, err := info.Hostname() - require.NoError(t, err) + assert.NilError(t, err) loggerDriver, err := New(info) - require.NoError(t, err) + assert.NilError(t, err) if !hec.connectionVerified { t.Fatal("By default connection should be verified") diff --git a/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go b/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go index 5e71d96f2..b76703747 100644 --- a/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go +++ b/vendor/github.com/docker/docker/daemon/logger/templates/templates_test.go @@ -4,15 +4,16 @@ import ( "bytes" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestNewParse(t *testing.T) { tm, err := NewParse("foo", "this is a {{ . }}") - assert.NoError(t, err) + assert.Check(t, err) var b bytes.Buffer - assert.NoError(t, tm.Execute(&b, "string")) + assert.Check(t, tm.Execute(&b, "string")) want := "this is a string" - assert.Equal(t, want, b.String()) + assert.Check(t, is.Equal(want, b.String())) } diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go index e5dcd0686..0f2e9c252 100644 --- a/vendor/github.com/docker/docker/daemon/network.go +++ b/vendor/github.com/docker/docker/daemon/network.go @@ -222,6 +222,8 @@ func (daemon *Daemon) releaseIngress(id string) { return } + daemon.deleteLoadBalancerSandbox(n) + if err := n.Delete(); err != nil { logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) return diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go index 8d5eebb88..a83f155fd 100644 --- a/vendor/github.com/docker/docker/daemon/oci_linux.go +++ b/vendor/github.com/docker/docker/daemon/oci_linux.go @@ -255,7 +255,7 @@ func setCapabilities(s *specs.Spec, c *container.Container) error { if c.HostConfig.Privileged { caplist = caps.GetAllCapabilities() } else { - caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Effective, c.HostConfig.CapAdd, c.HostConfig.CapDrop) + caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Bounding, c.HostConfig.CapAdd, c.HostConfig.CapDrop) if err != nil { return err } @@ -264,6 +264,12 @@ func setCapabilities(s *specs.Spec, c *container.Container) error { s.Process.Capabilities.Bounding = caplist s.Process.Capabilities.Permitted = caplist s.Process.Capabilities.Inheritable = caplist + // setUser has already been executed here + // if non root drop capabilities in the way execve does + if s.Process.User.UID != 0 { + s.Process.Capabilities.Effective = []string{} + s.Process.Capabilities.Permitted = []string{} + } return nil } @@ -705,6 +711,9 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c } func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { + if c.BaseFS == nil { + return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly nil") + } linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return err diff --git a/vendor/github.com/docker/docker/daemon/oci_linux_test.go b/vendor/github.com/docker/docker/daemon/oci_linux_test.go index f6bda7974..5f2731b8d 100644 --- a/vendor/github.com/docker/docker/daemon/oci_linux_test.go +++ b/vendor/github.com/docker/docker/daemon/oci_linux_test.go @@ -8,8 +8,8 @@ import ( "github.com/docker/docker/daemon/config" "github.com/docker/docker/oci" "github.com/docker/docker/pkg/idtools" - - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) // TestTmpfsDevShmNoDupMount checks that a user-specified /dev/shm tmpfs @@ -36,17 +36,17 @@ func TestTmpfsDevShmNoDupMount(t *testing.T) { // Mimick the code flow of daemon.createSpec(), enough to reproduce the issue ms, err := d.setupMounts(c) - assert.NoError(t, err) + assert.Check(t, err) ms = append(ms, c.IpcMounts()...) tmpfsMounts, err := c.TmpfsMounts() - assert.NoError(t, err) + assert.Check(t, err) ms = append(ms, tmpfsMounts...) s := oci.DefaultSpec() err = setMounts(&d, &s, c, ms) - assert.NoError(t, err) + assert.Check(t, err) } // TestIpcPrivateVsReadonly checks that in case of IpcMode: private @@ -70,19 +70,19 @@ func TestIpcPrivateVsReadonly(t *testing.T) { // We can't call createSpec() so mimick the minimal part // of its code flow, just enough to reproduce the issue. ms, err := d.setupMounts(c) - assert.NoError(t, err) + assert.Check(t, err) s := oci.DefaultSpec() s.Root.Readonly = c.HostConfig.ReadonlyRootfs err = setMounts(&d, &s, c, ms) - assert.NoError(t, err) + assert.Check(t, err) // Find the /dev/shm mount in ms, check it does not have ro for _, m := range s.Mounts { if m.Destination != "/dev/shm" { continue } - assert.Equal(t, false, inSlice(m.Options, "ro")) + assert.Check(t, is.Equal(false, inSlice(m.Options, "ro"))) } } diff --git a/vendor/github.com/docker/docker/daemon/oci_windows.go b/vendor/github.com/docker/docker/daemon/oci_windows.go index e2e10f999..edea2bced 100644 --- a/vendor/github.com/docker/docker/daemon/oci_windows.go +++ b/vendor/github.com/docker/docker/daemon/oci_windows.go @@ -234,6 +234,10 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S s.Root.Readonly = false // Windows does not support a read-only root filesystem if !isHyperV { + if c.BaseFS == nil { + return errors.New("createSpecWindowsFields: BaseFS of container " + c.ID + " is unexpectedly nil") + } + s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers if !strings.HasSuffix(s.Root.Path, `\`) { s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\ diff --git a/vendor/github.com/docker/docker/daemon/reload_test.go b/vendor/github.com/docker/docker/daemon/reload_test.go index a2500b2bb..9174bfba5 100644 --- a/vendor/github.com/docker/docker/daemon/reload_test.go +++ b/vendor/github.com/docker/docker/daemon/reload_test.go @@ -12,7 +12,8 @@ import ( _ "github.com/docker/docker/pkg/discovery/memory" "github.com/docker/docker/registry" "github.com/docker/libnetwork" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestDaemonReloadLabels(t *testing.T) { @@ -97,7 +98,7 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { sort.Strings(registries) sort.Strings(actual) - assert.Equal(t, registries, actual) + assert.Check(t, is.DeepEqual(registries, actual)) } func TestDaemonReloadMirrors(t *testing.T) { diff --git a/vendor/github.com/docker/docker/daemon/stats/collector.go b/vendor/github.com/docker/docker/daemon/stats/collector.go index 6b7479dfd..88e20984b 100644 --- a/vendor/github.com/docker/docker/daemon/stats/collector.go +++ b/vendor/github.com/docker/docker/daemon/stats/collector.go @@ -91,6 +91,10 @@ func (s *Collector) Run() { var pairs []publishersPair for { + // Put sleep at the start so that it will always be hit, + // preventing a tight loop if no stats are collected. + time.Sleep(s.interval) + // it does not make sense in the first iteration, // but saves allocations in further iterations pairs = pairs[:0] @@ -141,8 +145,6 @@ func (s *Collector) Run() { logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) } } - - time.Sleep(s.interval) } } diff --git a/vendor/github.com/docker/docker/daemon/trustkey_test.go b/vendor/github.com/docker/docker/daemon/trustkey_test.go index c49341d2a..ebc7e28ee 100644 --- a/vendor/github.com/docker/docker/daemon/trustkey_test.go +++ b/vendor/github.com/docker/docker/daemon/trustkey_test.go @@ -7,19 +7,19 @@ import ( "testing" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // LoadOrCreateTrustKey func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmpKeyFolderPath) tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") - require.NoError(t, err) + assert.NilError(t, err) _, err = loadOrCreateTrustKey(tmpKeyFile.Name()) testutil.ErrorContains(t, err, "Error loading key file") @@ -33,11 +33,11 @@ func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) { tmpKeyFile := tmpKeyFolderPath.Join("keyfile") key, err := loadOrCreateTrustKey(tmpKeyFile) - require.NoError(t, err) - assert.NotNil(t, key) + assert.NilError(t, err) + assert.Check(t, key != nil) _, err = os.Stat(tmpKeyFile) - require.NoError(t, err, "key file doesn't exist") + assert.NilError(t, err, "key file doesn't exist") } func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) { @@ -46,27 +46,27 @@ func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) { tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile") key, err := loadOrCreateTrustKey(tmpKeyFile) - require.NoError(t, err) - assert.NotNil(t, key) + assert.NilError(t, err) + assert.Check(t, key != nil) _, err = os.Stat(tmpKeyFile) - require.NoError(t, err, "key file doesn't exist") + assert.NilError(t, err, "key file doesn't exist") } func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) { defer os.Remove("keyfile") key, err := loadOrCreateTrustKey("keyfile") - require.NoError(t, err) - assert.NotNil(t, key) + assert.NilError(t, err) + assert.Check(t, key != nil) _, err = os.Stat("keyfile") - require.NoError(t, err, "key file doesn't exist") + assert.NilError(t, err, "key file doesn't exist") } func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { tmpKeyFile := filepath.Join("testdata", "keyfile") key, err := loadOrCreateTrustKey(tmpKeyFile) - require.NoError(t, err) + assert.NilError(t, err) expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY" - assert.Contains(t, key.String(), expected) + assert.Check(t, is.Contains(key.String(), expected)) } diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go index 8a3647b3b..7bac8e821 100644 --- a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go +++ b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/docker/docker/layer" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestV1IDService(t *testing.T) { @@ -24,7 +24,7 @@ func TestV1IDService(t *testing.T) { ns := v1IDService.namespace() - require.Equal(t, "v1id", ns) + assert.Equal(t, "v1id", ns) testVectors := []struct { registry string diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go index 7b7155169..f7b9a6d65 100644 --- a/vendor/github.com/docker/docker/distribution/push_v2.go +++ b/vendor/github.com/docker/docker/distribution/push_v2.go @@ -15,6 +15,7 @@ import ( "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/client" apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/distribution/metadata" @@ -55,12 +56,14 @@ type pushState struct { // confirmedV2 is set to true if we confirm we're talking to a v2 // registry. This is used to limit fallbacks to the v1 protocol. confirmedV2 bool + hasAuthInfo bool } func (p *v2Pusher) Push(ctx context.Context) (err error) { p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") + p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "") if err != nil { logrus.Debugf("Error getting v2 registry: %v", err) return err @@ -308,6 +311,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) + isUnauthorizedError := false for _, mountCandidate := range candidates { logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) createOpts := []distribution.BlobCreateOption{} @@ -360,11 +364,26 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} } return err.Descriptor, nil + case errcode.Errors: + for _, e := range err { + switch e := e.(type) { + case errcode.Error: + if e.Code == errcode.ErrorCodeUnauthorized { + // when unauthorized error that indicate user don't has right to push layer to register + logrus.Debugln("failed to push layer to registry because unauthorized error") + isUnauthorizedError = true + } + default: + } + } default: logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) } + // when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register + // and he hasn't login either, in this case candidate cache should be removed if len(mountCandidate.SourceRepository) > 0 && + !(isUnauthorizedError && !pd.pushState.hasAuthInfo) && (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || len(mountCandidate.HMAC) == 0) { cause := "blob mount failure" @@ -398,7 +417,6 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. } } defer layerUpload.Close() - // upload the blob return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) } diff --git a/vendor/github.com/docker/docker/distribution/push_v2_test.go b/vendor/github.com/docker/docker/distribution/push_v2_test.go index ac68470b6..c3616b936 100644 --- a/vendor/github.com/docker/docker/distribution/push_v2_test.go +++ b/vendor/github.com/docker/docker/distribution/push_v2_test.go @@ -2,6 +2,7 @@ package distribution // import "github.com/docker/docker/distribution" import ( "net/http" + "net/url" "reflect" "testing" @@ -9,9 +10,13 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/api/types" "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/progress" + refstore "github.com/docker/docker/reference" + "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" ) @@ -461,6 +466,158 @@ func TestLayerAlreadyExists(t *testing.T) { } } +type mockReferenceStore struct { +} + +func (s *mockReferenceStore) References(id digest.Digest) []reference.Named { + return []reference.Named{} +} +func (s *mockReferenceStore) ReferencesByName(ref reference.Named) []refstore.Association { + return []refstore.Association{} +} +func (s *mockReferenceStore) AddTag(ref reference.Named, id digest.Digest, force bool) error { + return nil +} +func (s *mockReferenceStore) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { + return nil +} +func (s *mockReferenceStore) Delete(ref reference.Named) (bool, error) { + return true, nil +} +func (s *mockReferenceStore) Get(ref reference.Named) (digest.Digest, error) { + return "", nil +} + +func TestWhenEmptyAuthConfig(t *testing.T) { + for _, authInfo := range []struct { + username string + password string + registryToken string + expected bool + }{ + { + username: "", + password: "", + registryToken: "", + expected: false, + }, + { + username: "username", + password: "password", + registryToken: "", + expected: true, + }, + { + username: "", + password: "", + registryToken: "token", + expected: true, + }, + } { + imagePushConfig := &ImagePushConfig{} + imagePushConfig.AuthConfig = &types.AuthConfig{ + Username: authInfo.username, + Password: authInfo.password, + RegistryToken: authInfo.registryToken, + } + imagePushConfig.ReferenceStore = &mockReferenceStore{} + repoInfo, _ := reference.ParseNormalizedNamed("xujihui1985/test.img") + pusher := &v2Pusher{ + config: imagePushConfig, + repoInfo: ®istry.RepositoryInfo{ + Name: repoInfo, + }, + endpoint: registry.APIEndpoint{ + URL: &url.URL{ + Scheme: "https", + Host: "index.docker.io", + }, + Version: registry.APIVersion1, + TrimHostname: true, + }, + } + pusher.Push(context.Background()) + if pusher.pushState.hasAuthInfo != authInfo.expected { + t.Errorf("hasAuthInfo does not match expected: %t != %t", authInfo.expected, pusher.pushState.hasAuthInfo) + } + } +} + +type mockBlobStoreWithCreate struct { + mockBlobStore + repo *mockRepoWithBlob +} + +func (blob *mockBlobStoreWithCreate) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + return nil, errcode.Errors(append([]error{errcode.ErrorCodeUnauthorized.WithMessage("unauthorized")})) +} + +type mockRepoWithBlob struct { + mockRepo +} + +func (m *mockRepoWithBlob) Blobs(ctx context.Context) distribution.BlobStore { + blob := &mockBlobStoreWithCreate{} + blob.mockBlobStore.repo = &m.mockRepo + blob.repo = m + return blob +} + +type mockMetadataService struct { + mockV2MetadataService +} + +func (m *mockMetadataService) GetMetadata(diffID layer.DiffID) ([]metadata.V2Metadata, error) { + return []metadata.V2Metadata{ + taggedMetadata("abcd", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e28", "docker.io/user/app1"), + taggedMetadata("abcd", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e22", "docker.io/user/app/base"), + taggedMetadata("hash", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e23", "docker.io/user/app"), + taggedMetadata("abcd", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e24", "127.0.0.1/user/app"), + taggedMetadata("hash", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e25", "docker.io/user/foo"), + taggedMetadata("hash", "sha256:ff3a5c916c92643ff77519ffa742d3ec61b7f591b6b7504599d95a4a41134e26", "docker.io/app/bar"), + }, nil +} + +var removeMetadata bool + +func (m *mockMetadataService) Remove(metadata metadata.V2Metadata) error { + removeMetadata = true + return nil +} + +func TestPushRegistryWhenAuthInfoEmpty(t *testing.T) { + repoInfo, _ := reference.ParseNormalizedNamed("user/app") + ms := &mockMetadataService{} + remoteErrors := map[digest.Digest]error{digest.Digest("sha256:apple"): distribution.ErrAccessDenied} + remoteBlobs := map[digest.Digest]distribution.Descriptor{digest.Digest("sha256:apple"): {Digest: digest.Digest("shar256:apple")}} + repo := &mockRepoWithBlob{ + mockRepo: mockRepo{ + t: t, + errors: remoteErrors, + blobs: remoteBlobs, + requests: []string{}, + }, + } + pd := &v2PushDescriptor{ + hmacKey: []byte("abcd"), + repoInfo: repoInfo, + layer: &storeLayer{ + Layer: layer.EmptyLayer, + }, + repo: repo, + v2MetadataService: ms, + pushState: &pushState{ + remoteLayers: make(map[layer.DiffID]distribution.Descriptor), + hasAuthInfo: false, + }, + checkedDigests: make(map[digest.Digest]struct{}), + } + pd.Upload(context.Background(), &progressSink{t}) + if removeMetadata { + t.Fatalf("expect remove not be called but called") + } +} + func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { meta := metadata.V2Metadata{ Digest: digest.Digest(dgst), diff --git a/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer b/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer index ed9ea7cbc..bc2f92a63 100755 --- a/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer +++ b/vendor/github.com/docker/docker/hack/dockerfile/install/proxy.installer @@ -3,7 +3,7 @@ # LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When # updating the binary version, consider updating github.com/docker/libnetwork # in vendor.conf accordingly -LIBNETWORK_COMMIT=ed2130d117c11c542327b4d5216a5db36770bc65 +LIBNETWORK_COMMIT=1b91bc94094ecfdae41daa465cc0c8df37dfb3dd install_proxy() { case "$1" in diff --git a/vendor/github.com/docker/docker/hack/test/e2e-run.sh b/vendor/github.com/docker/docker/hack/test/e2e-run.sh index b80f7fc31..b1470d654 100755 --- a/vendor/github.com/docker/docker/hack/test/e2e-run.sh +++ b/vendor/github.com/docker/docker/hack/test/e2e-run.sh @@ -13,8 +13,8 @@ export DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:-${ARCH}} : ${TESTDEBUG:=} integration_api_dirs=${TEST_INTEGRATION_DIR:-"$( - find ./integration -type d | - grep -vE '(^./integration($|/internal)|/testdata)')"} + find /tests/integration -type d | + grep -vE '(^/tests/integration($|/internal)|/testdata)')"} run_test_integration() { [[ "$TESTFLAGS" != *-check.f* ]] && run_test_integration_suites @@ -35,7 +35,7 @@ run_test_integration_suites() { run_test_integration_legacy_suites() { ( flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS" - cd test/integration-cli + cd /tests/integration-cli echo "Running $PWD" test_env ./test.main $flags ) diff --git a/vendor/github.com/docker/docker/image/fs_test.go b/vendor/github.com/docker/docker/image/fs_test.go index 6a634377e..dcf4da75f 100644 --- a/vendor/github.com/docker/docker/image/fs_test.go +++ b/vendor/github.com/docker/docker/image/fs_test.go @@ -11,16 +11,17 @@ import ( "testing" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" digest "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" ) func defaultFSStoreBackend(t *testing.T) (StoreBackend, func()) { tmpdir, err := ioutil.TempDir("", "images-fs-store") - assert.NoError(t, err) + assert.Check(t, err) fsBackend, err := NewFSStoreBackend(tmpdir) - assert.NoError(t, err) + assert.Check(t, err) return fsBackend, func() { os.RemoveAll(tmpdir) } } @@ -30,12 +31,12 @@ func TestFSGetInvalidData(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foobar")) - assert.NoError(t, err) + assert.Check(t, err) dgst := digest.Digest(id) err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600) - assert.NoError(t, err) + assert.Check(t, err) _, err = store.Get(id) testutil.ErrorContains(t, err, "failed to verify") @@ -47,7 +48,7 @@ func TestFSInvalidSet(t *testing.T) { id := digest.FromBytes([]byte("foobar")) err := os.Mkdir(filepath.Join(store.(*fs).root, contentDirName, string(id.Algorithm()), id.Hex()), 0700) - assert.NoError(t, err) + assert.Check(t, err) _, err = store.Set([]byte("foobar")) testutil.ErrorContains(t, err, "failed to write digest data") @@ -55,7 +56,7 @@ func TestFSInvalidSet(t *testing.T) { func TestFSInvalidRoot(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") - assert.NoError(t, err) + assert.Check(t, err) defer os.RemoveAll(tmpdir) tcases := []struct { @@ -70,10 +71,10 @@ func TestFSInvalidRoot(t *testing.T) { root := filepath.Join(tmpdir, tc.root) filePath := filepath.Join(tmpdir, tc.invalidFile) err := os.MkdirAll(filepath.Dir(filePath), 0700) - assert.NoError(t, err) + assert.Check(t, err) f, err := os.Create(filePath) - assert.NoError(t, err) + assert.Check(t, err) f.Close() _, err = NewFSStoreBackend(root) @@ -89,10 +90,10 @@ func TestFSMetadataGetSet(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NoError(t, err) + assert.Check(t, err) id2, err := store.Set([]byte("bar")) - assert.NoError(t, err) + assert.Check(t, err) tcases := []struct { id digest.Digest @@ -106,12 +107,12 @@ func TestFSMetadataGetSet(t *testing.T) { for _, tc := range tcases { err = store.SetMetadata(tc.id, tc.key, tc.value) - assert.NoError(t, err) + assert.Check(t, err) actual, err := store.GetMetadata(tc.id, tc.key) - assert.NoError(t, err) + assert.Check(t, err) - assert.Equal(t, tc.value, actual) + assert.Check(t, is.DeepEqual(tc.value, actual)) } _, err = store.GetMetadata(id2, "tkey2") @@ -130,19 +131,19 @@ func TestFSInvalidWalker(t *testing.T) { defer cleanup() fooID, err := store.Set([]byte("foo")) - assert.NoError(t, err) + assert.Check(t, err) err = ioutil.WriteFile(filepath.Join(store.(*fs).root, contentDirName, "sha256/foobar"), []byte("foobar"), 0600) - assert.NoError(t, err) + assert.Check(t, err) n := 0 err = store.Walk(func(id digest.Digest) error { - assert.Equal(t, fooID, id) + assert.Check(t, is.Equal(fooID, id)) n++ return nil }) - assert.NoError(t, err) - assert.Equal(t, 1, n) + assert.Check(t, err) + assert.Check(t, is.Equal(1, n)) } func TestFSGetSet(t *testing.T) { @@ -159,12 +160,12 @@ func TestFSGetSet(t *testing.T) { randomInput := make([]byte, 8*1024) _, err := rand.Read(randomInput) - assert.NoError(t, err) + assert.Check(t, err) // skipping use of digest pkg because it is used by the implementation h := sha256.New() _, err = h.Write(randomInput) - assert.NoError(t, err) + assert.Check(t, err) tcases = append(tcases, tcase{ input: randomInput, @@ -173,14 +174,14 @@ func TestFSGetSet(t *testing.T) { for _, tc := range tcases { id, err := store.Set([]byte(tc.input)) - assert.NoError(t, err) - assert.Equal(t, tc.expected, id) + assert.Check(t, err) + assert.Check(t, is.Equal(tc.expected, id)) } for _, tc := range tcases { data, err := store.Get(tc.expected) - assert.NoError(t, err) - assert.Equal(t, tc.input, data) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(tc.input, data)) } } @@ -209,22 +210,22 @@ func TestFSDelete(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NoError(t, err) + assert.Check(t, err) id2, err := store.Set([]byte("bar")) - assert.NoError(t, err) + assert.Check(t, err) err = store.Delete(id) - assert.NoError(t, err) + assert.Check(t, err) _, err = store.Get(id) testutil.ErrorContains(t, err, "failed to get digest") _, err = store.Get(id2) - assert.NoError(t, err) + assert.Check(t, err) err = store.Delete(id2) - assert.NoError(t, err) + assert.Check(t, err) _, err = store.Get(id2) testutil.ErrorContains(t, err, "failed to get digest") @@ -235,10 +236,10 @@ func TestFSWalker(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NoError(t, err) + assert.Check(t, err) id2, err := store.Set([]byte("bar")) - assert.NoError(t, err) + assert.Check(t, err) tcases := make(map[digest.Digest]struct{}) tcases[id] = struct{}{} @@ -249,9 +250,9 @@ func TestFSWalker(t *testing.T) { n++ return nil }) - assert.NoError(t, err) - assert.Equal(t, 2, n) - assert.Len(t, tcases, 0) + assert.Check(t, err) + assert.Check(t, is.Equal(2, n)) + assert.Check(t, is.Len(tcases, 0)) } func TestFSWalkerStopOnError(t *testing.T) { @@ -259,7 +260,7 @@ func TestFSWalkerStopOnError(t *testing.T) { defer cleanup() id, err := store.Set([]byte("foo")) - assert.NoError(t, err) + assert.Check(t, err) tcases := make(map[digest.Digest]struct{}) tcases[id] = struct{}{} diff --git a/vendor/github.com/docker/docker/image/image_test.go b/vendor/github.com/docker/docker/image/image_test.go index 429f20297..dfb438b4d 100644 --- a/vendor/github.com/docker/docker/image/image_test.go +++ b/vendor/github.com/docker/docker/image/image_test.go @@ -9,8 +9,9 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/layer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/google/go-cmp/cmp" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) const sampleImageJSON = `{ @@ -25,13 +26,13 @@ const sampleImageJSON = `{ func TestNewFromJSON(t *testing.T) { img, err := NewFromJSON([]byte(sampleImageJSON)) - require.NoError(t, err) - assert.Equal(t, sampleImageJSON, string(img.RawJSON())) + assert.NilError(t, err) + assert.Check(t, is.Equal(sampleImageJSON, string(img.RawJSON()))) } func TestNewFromJSONWithInvalidJSON(t *testing.T) { _, err := NewFromJSON([]byte("{}")) - assert.EqualError(t, err, "invalid image JSON, no RootFS key") + assert.Check(t, is.Error(err, "invalid image JSON, no RootFS key")) } func TestMarshalKeyOrder(t *testing.T) { @@ -42,7 +43,7 @@ func TestMarshalKeyOrder(t *testing.T) { Architecture: "c", }, }) - assert.NoError(t, err) + assert.Check(t, err) expectedOrder := []string{"architecture", "author", "comment"} var indexes []int @@ -71,10 +72,10 @@ func TestImage(t *testing.T) { computedID: ID(cid), } - assert.Equal(t, cid, img.ImageID()) - assert.Equal(t, cid, img.ID().String()) - assert.Equal(t, os, img.OperatingSystem()) - assert.Equal(t, config, img.RunConfig()) + assert.Check(t, is.Equal(cid, img.ImageID())) + assert.Check(t, is.Equal(cid, img.ID().String())) + assert.Check(t, is.Equal(os, img.OperatingSystem())) + assert.Check(t, is.DeepEqual(config, img.RunConfig())) } func TestImageOSNotEmpty(t *testing.T) { @@ -85,7 +86,7 @@ func TestImageOSNotEmpty(t *testing.T) { }, OSVersion: "osversion", } - assert.Equal(t, os, img.OperatingSystem()) + assert.Check(t, is.Equal(os, img.OperatingSystem())) } func TestNewChildImageFromImageWithRootFS(t *testing.T) { @@ -109,16 +110,16 @@ func TestNewChildImageFromImageWithRootFS(t *testing.T) { newImage := NewChildImage(parent, childConfig, "platform") expectedDiffIDs := []layer.DiffID{layer.DiffID("ba5e"), layer.DiffID("abcdef")} - assert.Equal(t, expectedDiffIDs, newImage.RootFS.DiffIDs) - assert.Equal(t, childConfig.Author, newImage.Author) - assert.Equal(t, childConfig.Config, newImage.Config) - assert.Equal(t, *childConfig.ContainerConfig, newImage.ContainerConfig) - assert.Equal(t, "platform", newImage.OS) - assert.Equal(t, childConfig.Config, newImage.Config) + assert.Check(t, is.DeepEqual(expectedDiffIDs, newImage.RootFS.DiffIDs)) + assert.Check(t, is.Equal(childConfig.Author, newImage.Author)) + assert.Check(t, is.DeepEqual(childConfig.Config, newImage.Config)) + assert.Check(t, is.DeepEqual(*childConfig.ContainerConfig, newImage.ContainerConfig)) + assert.Check(t, is.Equal("platform", newImage.OS)) + assert.Check(t, is.DeepEqual(childConfig.Config, newImage.Config)) - assert.Len(t, newImage.History, 2) - assert.Equal(t, childConfig.Comment, newImage.History[1].Comment) + assert.Check(t, is.Len(newImage.History, 2)) + assert.Check(t, is.Equal(childConfig.Comment, newImage.History[1].Comment)) - // RootFS should be copied not mutated - assert.NotEqual(t, parent.RootFS.DiffIDs, newImage.RootFS.DiffIDs) + assert.Check(t, !cmp.Equal(parent.RootFS.DiffIDs, newImage.RootFS.DiffIDs), + "RootFS should be copied not mutated") } diff --git a/vendor/github.com/docker/docker/image/spec/README.md b/vendor/github.com/docker/docker/image/spec/README.md new file mode 100644 index 000000000..9769af781 --- /dev/null +++ b/vendor/github.com/docker/docker/image/spec/README.md @@ -0,0 +1,46 @@ +# Docker Image Specification v1. + +This directory contains documents about Docker Image Specification v1.X. + +The v1 file layout and manifests are no longer used in Moby and Docker, except in `docker save` and `docker load`. + +However, v1 Image JSON (`application/vnd.docker.container.image.v1+json`) has been still widely +used and officially adopted in [V2 manifest](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) +and in [OCI Image Format Specification](https://github.com/opencontainers/image-spec). + +## v1.X rough Changelog + +All 1.X versions are compatible with older ones. + +### [v1.2](v1.2.md) + +* Implemented in Docker v1.12 (July, 2016) +* The official spec document was written in August 2016 ([#25750](https://github.com/moby/moby/pull/25750)) + +Changes: + +* `Healthcheck` struct was added to Image JSON + +### [v1.1](v1.1.md) + +* Implemented in Docker v1.10 (February, 2016) +* The official spec document was written in April 2016 ([#22264](https://github.com/moby/moby/pull/22264)) + +Changes: + +* IDs were made into SHA256 digest values rather than random values +* Layer directory names were made into deterministic values rather than random ID values +* `manifest.json` was added + +### [v1](v1.md) + +* The initial revision +* The official spec document was written in late 2014 ([#9560](https://github.com/moby/moby/pull/9560)), but actual implementations had existed even earlier + + +## Related specifications + +* [Open Containers Initiative (OCI) Image Format Specification v1.0.0](https://github.com/opencontainers/image-spec/tree/v1.0.0) +* [Docker Image Manifest Version 2, Schema 2](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) +* [Docker Image Manifest Version 2, Schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) (*DEPRECATED*) +* [Docker Registry HTTP API V2](https://docs.docker.com/registry/spec/api/) diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go index a8f8cee5b..9fd7d7dcf 100644 --- a/vendor/github.com/docker/docker/image/store.go +++ b/vendor/github.com/docker/docker/image/store.go @@ -27,6 +27,7 @@ type Store interface { Children(id ID) []ID Map() map[ID]*Image Heads() map[ID]*Image + Len() int } // LayerGetReleaser is a minimal interface for getting and releasing images. @@ -336,3 +337,9 @@ func (is *store) imagesMap(all bool) map[ID]*Image { } return images } + +func (is *store) Len() int { + is.RLock() + defer is.RUnlock() + return len(is.images) +} diff --git a/vendor/github.com/docker/docker/image/store_test.go b/vendor/github.com/docker/docker/image/store_test.go index ff40f7ea2..d59cde919 100644 --- a/vendor/github.com/docker/docker/image/store_test.go +++ b/vendor/github.com/docker/docker/image/store_test.go @@ -1,13 +1,14 @@ package image // import "github.com/docker/docker/image" import ( + "fmt" "runtime" "testing" - "github.com/docker/docker/internal/testutil" "github.com/docker/docker/layer" + "github.com/gotestyourself/gotestyourself/assert" + "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" ) func TestRestore(t *testing.T) { @@ -15,57 +16,57 @@ func TestRestore(t *testing.T) { defer cleanup() id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - assert.NoError(t, err) + assert.NilError(t, err) _, err = fs.Set([]byte(`invalid`)) - assert.NoError(t, err) + assert.NilError(t, err) id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - assert.NoError(t, err) + assert.NilError(t, err) err = fs.SetMetadata(id2, "parent", []byte(id1)) - assert.NoError(t, err) + assert.NilError(t, err) mlgrMap := make(map[string]LayerGetReleaser) mlgrMap[runtime.GOOS] = &mockLayerGetReleaser{} is, err := NewImageStore(fs, mlgrMap) - assert.NoError(t, err) + assert.NilError(t, err) - assert.Len(t, is.Map(), 2) + assert.Check(t, cmp.Len(is.Map(), 2)) img1, err := is.Get(ID(id1)) - assert.NoError(t, err) - assert.Equal(t, ID(id1), img1.computedID) - assert.Equal(t, string(id1), img1.computedID.String()) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), img1.computedID)) + assert.Check(t, cmp.Equal(string(id1), img1.computedID.String())) img2, err := is.Get(ID(id2)) - assert.NoError(t, err) - assert.Equal(t, "abc", img1.Comment) - assert.Equal(t, "def", img2.Comment) + assert.NilError(t, err) + assert.Check(t, cmp.Equal("abc", img1.Comment)) + assert.Check(t, cmp.Equal("def", img2.Comment)) _, err = is.GetParent(ID(id1)) - testutil.ErrorContains(t, err, "failed to read metadata") + assert.ErrorContains(t, err, "failed to read metadata") p, err := is.GetParent(ID(id2)) - assert.NoError(t, err) - assert.Equal(t, ID(id1), p) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), p)) children := is.Children(ID(id1)) - assert.Len(t, children, 1) - assert.Equal(t, ID(id2), children[0]) - assert.Len(t, is.Heads(), 1) + assert.Check(t, cmp.Len(children, 1)) + assert.Check(t, cmp.Equal(ID(id2), children[0])) + assert.Check(t, cmp.Len(is.Heads(), 1)) sid1, err := is.Search(string(id1)[:10]) - assert.NoError(t, err) - assert.Equal(t, ID(id1), sid1) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), sid1)) sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) - assert.NoError(t, err) - assert.Equal(t, ID(id1), sid1) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID(id1), sid1)) invalidPattern := digest.Digest(id1).Hex()[1:6] _, err = is.Search(invalidPattern) - testutil.ErrorContains(t, err, "No such image") + assert.ErrorContains(t, err, "No such image") } func TestAddDelete(t *testing.T) { @@ -73,34 +74,34 @@ func TestAddDelete(t *testing.T) { defer cleanup() id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - assert.NoError(t, err) - assert.Equal(t, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"), id1)) img, err := is.Get(id1) - assert.NoError(t, err) - assert.Equal(t, "abc", img.Comment) + assert.NilError(t, err) + assert.Check(t, cmp.Equal("abc", img.Comment)) id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - assert.NoError(t, err) + assert.NilError(t, err) err = is.SetParent(id2, id1) - assert.NoError(t, err) + assert.NilError(t, err) pid1, err := is.GetParent(id2) - assert.NoError(t, err) - assert.Equal(t, pid1, id1) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(pid1, id1)) _, err = is.Delete(id1) - assert.NoError(t, err) + assert.NilError(t, err) _, err = is.Get(id1) - testutil.ErrorContains(t, err, "failed to get digest") + assert.ErrorContains(t, err, "failed to get digest") _, err = is.Get(id2) - assert.NoError(t, err) + assert.NilError(t, err) _, err = is.GetParent(id2) - testutil.ErrorContains(t, err, "failed to read metadata") + assert.ErrorContains(t, err, "failed to read metadata") } func TestSearchAfterDelete(t *testing.T) { @@ -108,17 +109,17 @@ func TestSearchAfterDelete(t *testing.T) { defer cleanup() id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - assert.NoError(t, err) + assert.NilError(t, err) id1, err := is.Search(string(id)[:15]) - assert.NoError(t, err) - assert.Equal(t, id1, id) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(id1, id)) _, err = is.Delete(id) - assert.NoError(t, err) + assert.NilError(t, err) _, err = is.Search(string(id)[:15]) - testutil.ErrorContains(t, err, "No such image") + assert.ErrorContains(t, err, "No such image") } func TestParentReset(t *testing.T) { @@ -126,20 +127,20 @@ func TestParentReset(t *testing.T) { defer cleanup() id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) - assert.NoError(t, err) + assert.NilError(t, err) id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) - assert.NoError(t, err) + assert.NilError(t, err) id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) - assert.NoError(t, err) + assert.NilError(t, err) - assert.NoError(t, is.SetParent(id, id2)) - assert.Len(t, is.Children(id2), 1) + assert.Check(t, is.SetParent(id, id2)) + assert.Check(t, cmp.Len(is.Children(id2), 1)) - assert.NoError(t, is.SetParent(id, id3)) - assert.Len(t, is.Children(id2), 0) - assert.Len(t, is.Children(id3), 1) + assert.Check(t, is.SetParent(id, id3)) + assert.Check(t, cmp.Len(is.Children(id2), 0)) + assert.Check(t, cmp.Len(is.Children(id3), 1)) } func defaultImageStore(t *testing.T) (Store, func()) { @@ -148,7 +149,7 @@ func defaultImageStore(t *testing.T) (Store, func()) { mlgrMap := make(map[string]LayerGetReleaser) mlgrMap[runtime.GOOS] = &mockLayerGetReleaser{} store, err := NewImageStore(fsBackend, mlgrMap) - assert.NoError(t, err) + assert.NilError(t, err) return store, cleanup } @@ -158,17 +159,31 @@ func TestGetAndSetLastUpdated(t *testing.T) { defer cleanup() id, err := store.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) - assert.NoError(t, err) + assert.NilError(t, err) updated, err := store.GetLastUpdated(id) - assert.NoError(t, err) - assert.Equal(t, updated.IsZero(), true) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(updated.IsZero(), true)) - assert.NoError(t, store.SetLastUpdated(id)) + assert.Check(t, store.SetLastUpdated(id)) updated, err = store.GetLastUpdated(id) - assert.NoError(t, err) - assert.Equal(t, updated.IsZero(), false) + assert.NilError(t, err) + assert.Check(t, cmp.Equal(updated.IsZero(), false)) +} + +func TestStoreLen(t *testing.T) { + store, cleanup := defaultImageStore(t) + defer cleanup() + + expected := 10 + for i := 0; i < expected; i++ { + _, err := store.Create([]byte(fmt.Sprintf(`{"comment": "abc%d", "rootfs": {"type": "layers"}}`, i))) + assert.NilError(t, err) + } + numImages := store.Len() + assert.Equal(t, expected, numImages) + assert.Equal(t, len(store.Map()), numImages) } type mockLayerGetReleaser struct{} diff --git a/vendor/github.com/docker/docker/integration-cli/check_test.go b/vendor/github.com/docker/docker/integration-cli/check_test.go index a9f94fb57..18d3d3780 100644 --- a/vendor/github.com/docker/docker/integration-cli/check_test.go +++ b/vendor/github.com/docker/docker/integration-cli/check_test.go @@ -14,7 +14,6 @@ import ( "time" "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/cli/config" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build/fakestorage" @@ -386,74 +385,6 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) { s.ds.TearDownTest(c) } -func init() { - check.Suite(&DockerTrustSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerTrustSuite struct { - ds *DockerSuite - reg *registry.V2 - not *testNotary -} - -func (s *DockerTrustSuite) OnTimeout(c *check.C) { - s.ds.OnTimeout(c) -} - -func (s *DockerTrustSuite) SetUpTest(c *check.C) { - testRequires(c, registry.Hosting, NotaryServerHosting) - s.reg = setupRegistry(c, false, "", "") - s.not = setupNotary(c) -} - -func (s *DockerTrustSuite) TearDownTest(c *check.C) { - if s.reg != nil { - s.reg.Close() - } - if s.not != nil { - s.not.Close() - } - - // Remove trusted keys and metadata after test - os.RemoveAll(filepath.Join(config.Dir(), "trust")) - s.ds.TearDownTest(c) -} - -func init() { - ds := &DockerSuite{} - check.Suite(&DockerTrustedSwarmSuite{ - trustSuite: DockerTrustSuite{ - ds: ds, - }, - swarmSuite: DockerSwarmSuite{ - ds: ds, - }, - }) -} - -type DockerTrustedSwarmSuite struct { - swarmSuite DockerSwarmSuite - trustSuite DockerTrustSuite - reg *registry.V2 - not *testNotary -} - -func (s *DockerTrustedSwarmSuite) SetUpTest(c *check.C) { - s.swarmSuite.SetUpTest(c) - s.trustSuite.SetUpTest(c) -} - -func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { - s.trustSuite.TearDownTest(c) - s.swarmSuite.TearDownTest(c) -} - -func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { - s.swarmSuite.OnTimeout(c) -} - func init() { check.Suite(&DockerPluginSuite{ ds: &DockerSuite{}, diff --git a/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go b/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go index eb175365a..b05bfc322 100644 --- a/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go +++ b/vendor/github.com/docker/docker/integration-cli/cli/build/fakegit/fakegit.go @@ -11,11 +11,11 @@ import ( "github.com/docker/docker/integration-cli/cli/build/fakecontext" "github.com/docker/docker/integration-cli/cli/build/fakestorage" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) type testingT interface { - require.TestingT + assert.TestingT logT Fatal(args ...interface{}) Fatalf(string, ...interface{}) diff --git a/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go b/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go index c8c837ed2..bd49a33cf 100644 --- a/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go +++ b/vendor/github.com/docker/docker/integration-cli/cli/build/fakestorage/storage.go @@ -15,13 +15,13 @@ import ( "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/internal/test/environment" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) var testEnv *environment.Execution type testingT interface { - require.TestingT + assert.TestingT logT Fatal(args ...interface{}) Fatalf(string, ...interface{}) diff --git a/vendor/github.com/docker/docker/integration-cli/cli/cli.go b/vendor/github.com/docker/docker/integration-cli/cli/cli.go index eb03b2dd8..17f3fd52c 100644 --- a/vendor/github.com/docker/docker/integration-cli/cli/cli.go +++ b/vendor/github.com/docker/docker/integration-cli/cli/cli.go @@ -8,6 +8,7 @@ import ( "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration-cli/environment" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/icmd" "github.com/pkg/errors" ) @@ -24,6 +25,7 @@ func SetTestEnvironment(env *environment.Execution) { type CmdOperator func(*icmd.Cmd) func() type testingT interface { + assert.TestingT Fatal(args ...interface{}) Fatalf(string, ...interface{}) } diff --git a/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go b/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go index 9672d160f..9ca54236f 100644 --- a/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go +++ b/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go @@ -24,14 +24,14 @@ import ( "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/go-check/check" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/icmd" "github.com/pkg/errors" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) type testingT interface { - require.TestingT + assert.TestingT logT Fatalf(string, ...interface{}) } @@ -487,20 +487,20 @@ func (d *Daemon) handleUserns() { // LoadBusybox image into the daemon func (d *Daemon) LoadBusybox(t testingT) { clientHost, err := client.NewEnvClient() - require.NoError(t, err, "failed to create client") + assert.NilError(t, err, "failed to create client") defer clientHost.Close() ctx := context.Background() reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) - require.NoError(t, err, "failed to download busybox") + assert.NilError(t, err, "failed to download busybox") defer reader.Close() client, err := d.NewClient() - require.NoError(t, err, "failed to create client") + assert.NilError(t, err, "failed to create client") defer client.Close() resp, err := client.ImageLoad(ctx, reader, true) - require.NoError(t, err, "failed to load busybox") + assert.NilError(t, err, "failed to load busybox") defer resp.Body.Close() } @@ -563,11 +563,11 @@ func (d *Daemon) WaitRun(contID string) error { } // Info returns the info struct for this daemon -func (d *Daemon) Info(t require.TestingT) types.Info { +func (d *Daemon) Info(t assert.TestingT) types.Info { apiclient, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) info, err := apiclient.Info(context.Background()) - require.NoError(t, err) + assert.NilError(t, err) return info } diff --git a/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go b/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go index cb44f63f2..be0ddef99 100644 --- a/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go +++ b/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go @@ -11,8 +11,8 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" + "github.com/gotestyourself/gotestyourself/assert" "github.com/pkg/errors" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -235,12 +235,12 @@ func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interfac func (d *Swarm) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { apiclient, err := d.NewClient() - require.NoError(c, err) + assert.NilError(c, err) resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin) if client.IsErrNotFound(err) { return false, check.Commentf("%v", err) } - require.NoError(c, err) + assert.NilError(c, err) return resp.Enabled, check.Commentf("%+v", resp) } } @@ -249,12 +249,12 @@ func (d *Swarm) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, func (d *Swarm) CheckPluginImage(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { return func(c *check.C) (interface{}, check.CommentInterface) { apiclient, err := d.NewClient() - require.NoError(c, err) + assert.NilError(c, err) resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin) if client.IsErrNotFound(err) { return false, check.Commentf("%v", err) } - require.NoError(c, err) + assert.NilError(c, err) return resp.PluginReference, check.Commentf("%+v", resp) } } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go index e5423a474..cae7c1afe 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_build_test.go @@ -18,10 +18,10 @@ import ( "github.com/docker/docker/integration-cli/cli/build/fakestorage" "github.com/docker/docker/integration-cli/request" "github.com/go-check/check" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) @@ -296,12 +296,12 @@ func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) { "/build", request.RawContent(ctx.AsTarReader(c)), request.ContentType("application/x-tar")) - require.NoError(c, err) - assert.Equal(c, http.StatusOK, res.StatusCode) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) out, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(out), "Successfully built") + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) return out } @@ -316,15 +316,15 @@ func (s *DockerSuite) TestBuildOnBuildCache(c *check.C) { out := build(dockerfile) imageIDs := getImageIDsFromBuild(c, out) - assert.Len(c, imageIDs, 2) + assert.Check(c, is.Len(imageIDs, 2)) parentID, childID := imageIDs[0], imageIDs[1] client := testEnv.APIClient() // check parentID is correct image, _, err := client.ImageInspectWithRaw(context.Background(), childID) - require.NoError(c, err) - assert.Equal(c, parentID, image.Parent) + assert.NilError(c, err) + assert.Check(c, is.Equal(parentID, image.Parent)) } func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { @@ -333,12 +333,12 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry err := client.ImageTag(context.TODO(), "busybox", repoName) - assert.Nil(c, err) + assert.Check(c, err) // push the image to the registry rc, err := client.ImagePush(context.TODO(), repoName, types.ImagePushOptions{RegistryAuth: "{}"}) - assert.Nil(c, err) + assert.Check(c, err) _, err = io.Copy(ioutil.Discard, rc) - assert.Nil(c, err) + assert.Check(c, err) dockerfile := fmt.Sprintf(` FROM %s AS foo @@ -356,12 +356,12 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) { "/build?pull=1", request.RawContent(ctx.AsTarReader(c)), request.ContentType("application/x-tar")) - require.NoError(c, err) - assert.Equal(c, http.StatusOK, res.StatusCode) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) out, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(out), "Successfully built") + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) } func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { @@ -374,11 +374,11 @@ func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { Mode: 0600, Typeflag: tar.TypeReg, }) - require.NoError(c, err) + assert.NilError(c, err) _, err = tw.Write(dt) - require.NoError(c, err) + assert.NilError(c, err) err = tw.Close() - require.NoError(c, err) + assert.NilError(c, err) server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{ "test.tar": buffer, @@ -400,12 +400,12 @@ func (s *DockerSuite) TestBuildAddRemoteNoDecompress(c *check.C) { "/build", request.RawContent(ctx.AsTarReader(c)), request.ContentType("application/x-tar")) - require.NoError(c, err) - assert.Equal(c, http.StatusOK, res.StatusCode) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) out, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(out), "Successfully built") + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) } func (s *DockerSuite) TestBuildChownOnCopy(c *check.C) { @@ -433,8 +433,8 @@ func (s *DockerSuite) TestBuildChownOnCopy(c *check.C) { c.Assert(res.StatusCode, checker.Equals, http.StatusOK) out, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(out), "Successfully built") + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) } func (s *DockerSuite) TestBuildCopyCacheOnFileChange(c *check.C) { @@ -454,11 +454,11 @@ COPY file /file` request.RawContent(ctx.AsTarReader(c)), request.ContentType("application/x-tar")) - require.NoError(c, err) - assert.Equal(c, http.StatusOK, res.StatusCode) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) out, err := request.ReadBody(body) - require.NoError(c, err) + assert.NilError(c, err) ids := getImageIDsFromBuild(c, out) return ids[len(ids)-1] @@ -493,11 +493,11 @@ ADD file /file` request.RawContent(ctx.AsTarReader(c)), request.ContentType("application/x-tar")) - require.NoError(c, err) - assert.Equal(c, http.StatusOK, res.StatusCode) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) out, err := request.ReadBody(body) - require.NoError(c, err) + assert.NilError(c, err) ids := getImageIDsFromBuild(c, out) return ids[len(ids)-1] @@ -530,7 +530,7 @@ func (s *DockerSuite) TestBuildWithSession(c *check.C) { defer fctx.Close() out := testBuildWithSession(c, fctx.Dir, dockerfile) - assert.Contains(c, out, "some content") + assert.Check(c, is.Contains(out, "some content")) fctx.Add("second", "contentcontent") @@ -540,20 +540,20 @@ func (s *DockerSuite) TestBuildWithSession(c *check.C) { ` out = testBuildWithSession(c, fctx.Dir, dockerfile) - assert.Equal(c, strings.Count(out, "Using cache"), 2) - assert.Contains(c, out, "contentcontent") + assert.Check(c, is.Equal(strings.Count(out, "Using cache"), 2)) + assert.Check(c, is.Contains(out, "contentcontent")) client := testEnv.APIClient() du, err := client.DiskUsage(context.TODO()) - assert.Nil(c, err) - assert.True(c, du.BuilderSize > 10) + assert.Check(c, err) + assert.Check(c, du.BuilderSize > 10) out = testBuildWithSession(c, fctx.Dir, dockerfile) - assert.Equal(c, strings.Count(out, "Using cache"), 4) + assert.Check(c, is.Equal(strings.Count(out, "Using cache"), 4)) du2, err := client.DiskUsage(context.TODO()) - assert.Nil(c, err) - assert.Equal(c, du.BuilderSize, du2.BuilderSize) + assert.Check(c, err) + assert.Check(c, is.Equal(du.BuilderSize, du2.BuilderSize)) // rebuild with regular tar, confirm cache still applies fctx.Add("Dockerfile", dockerfile) @@ -561,26 +561,26 @@ func (s *DockerSuite) TestBuildWithSession(c *check.C) { "/build", request.RawContent(fctx.AsTarReader(c)), request.ContentType("application/x-tar")) - require.NoError(c, err) - assert.Equal(c, http.StatusOK, res.StatusCode) + assert.NilError(c, err) + assert.Check(c, is.DeepEqual(http.StatusOK, res.StatusCode)) outBytes, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(outBytes), "Successfully built") - assert.Equal(c, strings.Count(string(outBytes), "Using cache"), 4) + assert.NilError(c, err) + assert.Check(c, is.Contains(string(outBytes), "Successfully built")) + assert.Check(c, is.Equal(strings.Count(string(outBytes), "Using cache"), 4)) _, err = client.BuildCachePrune(context.TODO()) - assert.Nil(c, err) + assert.Check(c, err) du, err = client.DiskUsage(context.TODO()) - assert.Nil(c, err) - assert.Equal(c, du.BuilderSize, int64(0)) + assert.Check(c, err) + assert.Check(c, is.Equal(du.BuilderSize, int64(0))) } func testBuildWithSession(c *check.C, dir, dockerfile string) (outStr string) { client := testEnv.APIClient() sess, err := session.NewSession("foo1", "foo") - assert.Nil(c, err) + assert.Check(c, err) fsProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ {Dir: dir}, @@ -601,17 +601,17 @@ func testBuildWithSession(c *check.C, dir, dockerfile string) (outStr string) { if err != nil { return err } - assert.Equal(c, res.StatusCode, http.StatusOK) + assert.Check(c, is.DeepEqual(res.StatusCode, http.StatusOK)) out, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(out), "Successfully built") + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) sess.Close() outStr = string(out) return nil }) err = g.Wait() - assert.Nil(c, err) + assert.Check(c, err) return } @@ -633,8 +633,8 @@ ENV foo bar` c.Assert(res.StatusCode, checker.Equals, http.StatusOK) out, err := request.ReadBody(body) - require.NoError(c, err) - assert.Contains(c, string(out), "Successfully built") + assert.NilError(c, err) + assert.Check(c, is.Contains(string(out), "Successfully built")) } type buildLine struct { @@ -651,7 +651,7 @@ func getImageIDsFromBuild(c *check.C, output []byte) []string { continue } entry := buildLine{} - require.NoError(c, json.Unmarshal(line, &entry)) + assert.NilError(c, json.Unmarshal(line, &entry)) if entry.Aux.ID != "" { ids = append(ids, entry.Aux.ID) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go index ed1941022..c0e5ff5b1 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_test.go @@ -31,9 +31,9 @@ import ( "github.com/docker/docker/volume" "github.com/docker/go-connections/nat" "github.com/go-check/check" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -2027,47 +2027,47 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { &containertypes.HostConfig{Mounts: []mounttypes.Mount{x.spec}}, &networktypes.NetworkingConfig{}, "") - require.NoError(c, err) + assert.NilError(c, err) containerInspect, err := apiclient.ContainerInspect(ctx, container.ID) - require.NoError(c, err) + assert.NilError(c, err) mps := containerInspect.Mounts - require.Len(c, mps, 1) + assert.Assert(c, is.Len(mps, 1)) mountPoint := mps[0] if x.expected.Source != "" { - assert.Equal(c, x.expected.Source, mountPoint.Source) + assert.Check(c, is.Equal(x.expected.Source, mountPoint.Source)) } if x.expected.Name != "" { - assert.Equal(c, x.expected.Name, mountPoint.Name) + assert.Check(c, is.Equal(x.expected.Name, mountPoint.Name)) } if x.expected.Driver != "" { - assert.Equal(c, x.expected.Driver, mountPoint.Driver) + assert.Check(c, is.Equal(x.expected.Driver, mountPoint.Driver)) } if x.expected.Propagation != "" { - assert.Equal(c, x.expected.Propagation, mountPoint.Propagation) + assert.Check(c, is.Equal(x.expected.Propagation, mountPoint.Propagation)) } - assert.Equal(c, x.expected.RW, mountPoint.RW) - assert.Equal(c, x.expected.Type, mountPoint.Type) - assert.Equal(c, x.expected.Mode, mountPoint.Mode) - assert.Equal(c, x.expected.Destination, mountPoint.Destination) + assert.Check(c, is.Equal(x.expected.RW, mountPoint.RW)) + assert.Check(c, is.Equal(x.expected.Type, mountPoint.Type)) + assert.Check(c, is.Equal(x.expected.Mode, mountPoint.Mode)) + assert.Check(c, is.Equal(x.expected.Destination, mountPoint.Destination)) err = apiclient.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) - require.NoError(c, err) + assert.NilError(c, err) poll.WaitOn(c, containerExit(apiclient, container.ID), poll.WithDelay(time.Second)) err = apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ RemoveVolumes: true, Force: true, }) - require.NoError(c, err) + assert.NilError(c, err) switch { // Named volumes still exist after the container is removed case x.spec.Type == "volume" && len(x.spec.Source) > 0: _, err := apiclient.VolumeInspect(ctx, mountPoint.Name) - require.NoError(c, err) + assert.NilError(c, err) // Bind mounts are never removed with the container case x.spec.Type == "bind": @@ -2075,7 +2075,7 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { // anonymous volumes are removed default: _, err := apiclient.VolumeInspect(ctx, mountPoint.Name) - assert.True(c, client.IsErrNotFound(err)) + assert.Check(c, client.IsErrNotFound(err)) } } } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go index eb2892575..4c8ace484 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_containers_windows_test.go @@ -13,8 +13,8 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" "github.com/go-check/check" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -65,12 +65,12 @@ func (s *DockerSuite) TestContainersAPICreateMountsBindNamedPipe(c *check.C) { }, }, nil, name) - require.NoError(c, err) + assert.NilError(c, err) err = client.ContainerStart(ctx, name, types.ContainerStartOptions{}) - require.NoError(c, err) + assert.NilError(c, err) err = <-ch - require.NoError(c, err) - assert.Equal(c, text, strings.TrimSpace(string(b))) + assert.NilError(c, err) + assert.Check(c, is.Equal(text, strings.TrimSpace(string(b)))) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go index 52a889f08..2f81d6e1e 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_inspect_test.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/go-check/check" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func (s *DockerSuite) TestInspectAPIContainerResponse(c *check.C) { @@ -115,8 +116,8 @@ func (s *DockerSuite) TestInspectAPIImageResponse(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(imageJSON.RepoTags, checker.HasLen, 2) - assert.Contains(c, imageJSON.RepoTags, "busybox:latest") - assert.Contains(c, imageJSON.RepoTags, "busybox:mytag") + assert.Check(c, is.Contains(imageJSON.RepoTags, "busybox:latest")) + assert.Check(c, is.Contains(imageJSON.RepoTags, "busybox:mytag")) } // #17131, #17139, #17173 diff --git a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go index b5edfd2c0..2ba69acdb 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_api_swarm_test.go @@ -25,8 +25,8 @@ import ( "github.com/docker/docker/integration-cli/request" "github.com/docker/swarmkit/ca" "github.com/go-check/check" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -1012,16 +1012,16 @@ func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) { name := "test-scoped-network" ctx := context.Background() apiclient, err := d.NewClient() - require.NoError(c, err) + assert.NilError(c, err) resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"}) - require.NoError(c, err) + assert.NilError(c, err) network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{}) - require.NoError(c, err) - assert.Equal(c, "swarm", network.Scope) - assert.Equal(c, resp.ID, network.ID) + assert.NilError(c, err) + assert.Check(c, is.Equal("swarm", network.Scope)) + assert.Check(c, is.Equal(resp.ID, network.ID)) _, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"}) - assert.True(c, client.IsErrNotFound(err)) + assert.Check(c, client.IsErrNotFound(err)) } diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go index fc2dafe41..65c63abdb 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_build_test.go @@ -4048,140 +4048,6 @@ func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { }) } -func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-build") - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, repoName) - - name := "testtrustedbuild" - - buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ - Out: fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7]), - }) - - // We should also have a tag reference for the image. - dockerCmd(c, "inspect", repoName) - - // We should now be able to remove the tag reference. - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, repoName) - - name := "testtrustedbuilduntrustedtag" - - buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "does not have trust data for", - }) -} - -// FIXME(vdemeester) should migrate to docker/cli e2e tests -func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { - testRequires(c, DaemonIsLinux) - tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") - c.Assert(err, check.IsNil) - defer os.RemoveAll(tempDir) - - // Make a real context directory in this temp directory with a simple - // Dockerfile. - realContextDirname := filepath.Join(tempDir, "context") - if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { - c.Fatal(err) - } - - if err = ioutil.WriteFile( - filepath.Join(realContextDirname, "Dockerfile"), - []byte(` - FROM busybox - RUN echo hello world - `), - os.FileMode(0644), - ); err != nil { - c.Fatal(err) - } - - // Make a symlink to the real context directory. - contextSymlinkName := filepath.Join(tempDir, "context_link") - if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { - c.Fatal(err) - } - - // Executing the build with the symlink as the specified context should - // *not* fail. - dockerCmd(c, "build", contextSymlinkName) -} - -func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { - testRequires(c, NotaryHosting) - - latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") - repoName := strings.TrimSuffix(latestTag, ":latest") - - // Now create the releases role - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // push a different tag to the releases role - otherTag := fmt.Sprintf("%s:other", repoName) - cli.DockerCmd(c, "tag", "busybox", otherTag) - - cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) - s.assertTargetInRoles(c, repoName, "other", "targets/releases") - s.assertTargetNotInRoles(c, repoName, "other", "targets") - - cli.DockerCmd(c, "rmi", otherTag) - - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, otherTag) - name := "testtrustedbuildreleasesrole" - cli.BuildCmd(c, name, trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ - Out: fmt.Sprintf("FROM %s@sha", repoName), - }) -} - -func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { - testRequires(c, NotaryHosting) - - latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") - repoName := strings.TrimSuffix(latestTag, ":latest") - - // Now create a non-releases delegation role - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // push a different tag to the other role - otherTag := fmt.Sprintf("%s:other", repoName) - cli.DockerCmd(c, "tag", "busybox", otherTag) - - cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success) - s.assertTargetInRoles(c, repoName, "other", "targets/other") - s.assertTargetNotInRoles(c, repoName, "other", "targets") - - cli.DockerCmd(c, "rmi", otherTag) - - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, otherTag) - - name := "testtrustedbuildotherrole" - cli.Docker(cli.Build(name), trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{ - ExitCode: 1, - }) -} - // Issue #15634: COPY fails when path starts with "null" func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { name := "testbuildnullstringinaddcopyvolume" @@ -6018,28 +5884,6 @@ func (s *DockerSuite) TestBuildMultiStageNameVariants(c *check.C) { cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"}) } -func (s *DockerTrustSuite) TestBuildMultiStageTrusted(c *check.C) { - img1 := s.setupTrustedImage(c, "trusted-build1") - img2 := s.setupTrustedImage(c, "trusted-build2") - dockerFile := fmt.Sprintf(` - FROM %s AS build-base - RUN echo ok > /foo - FROM %s - COPY --from=build-base foo bar`, img1, img2) - - name := "testcopyfromtrustedbuild" - - r := buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)) - r.Assert(c, icmd.Expected{ - Out: fmt.Sprintf("FROM %s@sha", img1[:len(img1)-7]), - }) - r.Assert(c, icmd.Expected{ - Out: fmt.Sprintf("FROM %s@sha", img2[:len(img2)-7]), - }) - - dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"}) -} - func (s *DockerSuite) TestBuildMultiStageMultipleBuildsWindows(c *check.C) { testRequires(c, DaemonIsWindows) dockerfile := ` diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go index 2d5dd486e..ac97e0aec 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_by_digest_test.go @@ -15,8 +15,9 @@ import ( "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" ) var ( @@ -403,7 +404,7 @@ func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(imageJSON, checker.HasLen, 1) c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) - assert.Contains(c, imageJSON[0].RepoDigests, imageReference) + assert.Check(c, is.Contains(imageJSON[0].RepoDigests, imageReference)) } func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go index 448af9f19..120b62bc0 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_create_test.go @@ -3,7 +3,6 @@ package main import ( "encoding/json" "fmt" - "io/ioutil" "os" "reflect" "strings" @@ -16,7 +15,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/go-connections/nat" "github.com/go-check/check" - "github.com/gotestyourself/gotestyourself/icmd" ) // Make sure we can create a simple container with some args @@ -292,75 +290,6 @@ func (s *DockerSuite) TestCreateByImageID(c *check.C) { } } -func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-create") - - // Try create - cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) - cli.DockerCmd(c, "rmi", repoName) - - // Try untrusted create to ensure we pushed the tag to the registry - cli.Docker(cli.Args("create", "--disable-content-trust=true", repoName)).Assert(c, SuccessDownloadedOnStderr) -} - -func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) - withTagName := fmt.Sprintf("%s:latest", repoName) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", withTagName) - cli.DockerCmd(c, "push", withTagName) - cli.DockerCmd(c, "rmi", withTagName) - - // Try trusted create on untrusted tag - cli.Docker(cli.Args("create", withTagName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: fmt.Sprintf("does not have trust data for %s", repoName), - }) -} - -func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-isolated-create") - - // Try create - cli.Docker(cli.Args("--config", "/tmp/docker-isolated-create", "create", repoName), trustedCmd).Assert(c, SuccessTagging) - defer os.RemoveAll("/tmp/docker-isolated-create") - - cli.DockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") - c.Assert(err, check.IsNil) - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - cli.DockerCmd(c, "rmi", repoName) - - // Try create - cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, SuccessTagging) - cli.DockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - c.Assert(err, check.IsNil) - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Now, try creating with the original client from this new trust server. This should fail because the new root is invalid. - cli.Docker(cli.Args("create", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "could not rotate trust to a new trusted root", - }) -} - func (s *DockerSuite) TestCreateStopSignal(c *check.C) { name := "test_create_stop_signal" dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go index 8ca725444..8a936e3e4 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_plugins_test.go @@ -16,7 +16,6 @@ import ( "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration-cli/fixtures/plugin" "github.com/go-check/check" - "github.com/gotestyourself/gotestyourself/icmd" "golang.org/x/net/context" ) @@ -352,51 +351,6 @@ func (s *DockerSuite) TestPluginInspectOnWindows(c *check.C) { c.Assert(err.Error(), checker.Contains, "plugins are not supported on this platform") } -func (s *DockerTrustSuite) TestPluginTrustedInstall(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - - trustedName := s.setupTrustedplugin(c, pNameWithTag, "trusted-plugin-install") - - cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, icmd.Expected{ - Out: trustedName, - }) - - out := cli.DockerCmd(c, "plugin", "ls").Combined() - c.Assert(out, checker.Contains, "true") - - out = cli.DockerCmd(c, "plugin", "disable", trustedName).Combined() - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - - out = cli.DockerCmd(c, "plugin", "enable", trustedName).Combined() - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - - out = cli.DockerCmd(c, "plugin", "rm", "-f", trustedName).Combined() - c.Assert(strings.TrimSpace(out), checker.Contains, trustedName) - - // Try untrusted pull to ensure we pushed the tag to the registry - cli.Docker(cli.Args("plugin", "install", "--disable-content-trust=true", "--grant-all-permissions", trustedName), trustedCmd).Assert(c, SuccessDownloaded) - - out = cli.DockerCmd(c, "plugin", "ls").Combined() - c.Assert(out, checker.Contains, "true") - -} - -func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - - pluginName := fmt.Sprintf("%v/dockercliuntrusted/plugintest:latest", privateRegistryURL) - // install locally and push to private registry - cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", pluginName, pNameWithTag) - cli.DockerCmd(c, "plugin", "push", pluginName) - cli.DockerCmd(c, "plugin", "rm", "-f", pluginName) - - // Try trusted install on untrusted plugin - cli.Docker(cli.Args("plugin", "install", "--grant-all-permissions", pluginName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "Error: remote trust data does not exist", - }) -} - func (ps *DockerPluginSuite) TestPluginIDPrefix(c *check.C) { name := "test" client := testEnv.APIClient() diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go deleted file mode 100644 index 60e1c3db1..000000000 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_pull_trusted_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - - "github.com/docker/docker/integration-cli/checker" - "github.com/docker/docker/integration-cli/cli" - "github.com/docker/docker/integration-cli/cli/build" - "github.com/go-check/check" - "github.com/gotestyourself/gotestyourself/icmd" -) - -func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-pull") - - // Try pull - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) - - cli.DockerCmd(c, "rmi", repoName) - // Try untrusted pull to ensure we pushed the tag to the registry - cli.Docker(cli.Args("pull", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloaded) -} - -func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-isolated-pull") - - // Try pull (run from isolated directory without trust information) - cli.Docker(cli.Args("--config", "/tmp/docker-isolated", "pull", repoName), trustedCmd).Assert(c, SuccessTagging) - - cli.DockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.DockerCmd(c, "push", repoName) - cli.DockerCmd(c, "rmi", repoName) - - // Try trusted pull on untrusted tag - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "Error: remote trust data does not exist", - }) -} - -func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") - if err != nil { - c.Fatalf("Failed to create local temp dir") - } - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - cli.DockerCmd(c, "rmi", repoName) - - // Try pull - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) - cli.DockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - - c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Now, try pulling with the original client from this new trust server. This should fail because the new root is invalid. - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "could not rotate trust to a new trusted root", - }) -} - -func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-offline-pull") - - cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "error contacting notary server", - }) - // Do valid trusted pull to warm cache - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessTagging) - cli.DockerCmd(c, "rmi", repoName) - - // Try pull again with invalid notary server, should use cache - cli.Docker(cli.Args("pull", repoName), trustedCmdWithServer("https://invalidnotaryserver")).Assert(c, SuccessTagging) -} - -func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") - // tag the image and upload it to the private registry - cli.BuildCmd(c, repoName, build.WithDockerfile(` - FROM busybox - CMD echo trustedpulldelete - `)) - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - cli.DockerCmd(c, "rmi", repoName) - - // Try pull - result := cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Success) - - matches := digestRegex.FindStringSubmatch(result.Combined()) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", result.Combined())) - pullDigest := matches[1] - - imageID := inspectField(c, repoName, "Id") - - imageByDigest := repoName + "@" + pullDigest - byDigestID := inspectField(c, imageByDigest, "Id") - - c.Assert(byDigestID, checker.Equals, imageID) - - // rmi of tag should also remove the digest reference - cli.DockerCmd(c, "rmi", repoName) - - _, err := inspectFieldWithError(imageByDigest, "Id") - c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) - - _, err = inspectFieldWithError(imageID, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) -} - -func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - - // Push with targets first, initializing the repo - cli.DockerCmd(c, "tag", "busybox", targetName) - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) - s.assertTargetInRoles(c, repoName, "latest", "targets") - - // Try pull, check we retrieve from targets role - cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Err: "retrieving target for targets role", - }) - - // Now we'll create the releases role, and try pushing and pulling - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // try a pull, check that we can still pull because we can still read the - // old tag in the targets role - cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Err: "retrieving target for targets role", - }) - - // try a pull -a, check that it succeeds because we can still pull from the - // targets role - cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Success) - - // Push, should sign with targets/releases - cli.DockerCmd(c, "tag", "busybox", targetName) - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) - s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") - - // Try pull, check we retrieve from targets/releases role - cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Err: "retrieving target for targets/releases role", - }) - - // Create another delegation that we'll sign with - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) - s.notaryPublish(c, repoName) - - cli.DockerCmd(c, "tag", "busybox", targetName) - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) - s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") - - // Try pull, check we retrieve from targets/releases role - cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Err: "retrieving target for targets/releases role", - }) -} - -func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - - // We'll create a repo first with a non-release delegation role, so that when we - // push we'll sign it into the delegation role - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // Push should write to the delegation role, not targets - cli.DockerCmd(c, "tag", "busybox", targetName) - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Success) - s.assertTargetInRoles(c, repoName, "latest", "targets/other") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull - we should fail, since pull will only pull from the targets/releases - // role or the targets role - cli.DockerCmd(c, "tag", "busybox", targetName) - cli.Docker(cli.Args("-D", "pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "No trust data for", - }) - - // try a pull -a: we should fail since pull will only pull from the targets/releases - // role or the targets role - cli.Docker(cli.Args("-D", "pull", "-a", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "No trusted tags for", - }) -} diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go index 94efa08ea..48d6be2ac 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_push_test.go @@ -7,14 +7,11 @@ import ( "net/http" "net/http/httptest" "os" - "path/filepath" "strings" "sync" "github.com/docker/distribution/reference" - "github.com/docker/docker/cli/config" "github.com/docker/docker/integration-cli/checker" - "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" "github.com/go-check/check" "github.com/gotestyourself/gotestyourself/icmd" @@ -281,225 +278,6 @@ func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c c.Assert(out3, check.Equals, "hello world") } -func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Try pull after push - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Out: "Status: Image is up to date", - }) - - // Assert that we rotated the snapshot key to the server by checking our local keystore - contents, err := ioutil.ReadDir(filepath.Join(config.Dir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) - c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) - // Check that we only have 1 key (targets key) - c.Assert(contents, checker.HasLen, 1) -} - -func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "12345678")).Assert(c, SuccessSigningAndPushing) - - // Try pull after push - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Out: "Status: Image is up to date", - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - // Using a name that doesn't resolve to an address makes this test faster - cli.Docker(cli.Args("push", repoName), trustedCmdWithServer("https://server.invalid:81/")).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "error contacting notary server", - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - result := cli.Docker(cli.Args("push", "--disable-content-trust", repoName), trustedCmdWithServer("https://server.invalid:81/")) - result.Assert(c, icmd.Success) - c.Assert(result.Combined(), check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.DockerCmd(c, "push", repoName) - - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Try pull after push - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, icmd.Expected{ - Out: "Status: Image is up to date", - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - // Do a trusted push - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Do another trusted push - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - cli.DockerCmd(c, "rmi", repoName) - - // Try pull to ensure the double push did not break our ability to pull - cli.Docker(cli.Args("pull", repoName), trustedCmd).Assert(c, SuccessDownloaded) -} - -func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Push with wrong passphrases - cli.Docker(cli.Args("push", repoName), trustedCmdWithPassphrases("12345678", "87654321")).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "could not find necessary signing keys", - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryPublish(c, repoName) - - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", targetName) - - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) - // check to make sure that the target has been added to targets/releases and not targets - s.assertTargetInRoles(c, repoName, "latest", "targets/releases") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(config.Dir(), "trust")) - - cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ - Out: "Status: Image is up to date", - }) -} - -func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) - s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) - s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) - - // import everything except the third key - s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) - s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) - - s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) - s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) - - s.notaryPublish(c, repoName) - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", targetName) - - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // check to make sure that the target has been added to targets/role1 and targets/role2, and - // not targets (because there are delegations) or targets/role3 (due to missing key) or - // targets/role1/subrole (due to it being a second level delegation) - s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(config.Dir(), "trust")) - - // pull should fail because none of these are the releases role - cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - }) -} - -func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") - s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") - s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") - s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") - - // import everything except the third key - s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) - s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) - s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) - - s.notaryPublish(c, repoName) - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", targetName) - - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // check to make sure that the target has been added to targets/role1 and targets/role4, and - // not targets (because there are delegations) or targets/role2 (due to path restrictions) or - // targets/role3 (due to missing key) - s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(config.Dir(), "trust")) - - // pull should fail because none of these are the releases role - cli.Docker(cli.Args("pull", targetName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - }) -} - -func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) - s.notaryPublish(c, repoName) - - // do not import any delegations key - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", targetName) - - cli.Docker(cli.Args("push", targetName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "no valid signing keys", - }) - s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") -} - func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) dockerCmd(c, "tag", "busybox", repoName) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go index 871fee7d0..a4984862e 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_run_test.go @@ -3140,75 +3140,6 @@ func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { } } -func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := s.setupTrustedImage(c, "trusted-run") - - // Try run - cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) - cli.DockerCmd(c, "rmi", repoName) - - // Try untrusted run to ensure we pushed the tag to the registry - cli.Docker(cli.Args("run", "--disable-content-trust=true", repoName), trustedCmd).Assert(c, SuccessDownloadedOnStderr) -} - -func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.DockerCmd(c, "push", repoName) - cli.DockerCmd(c, "rmi", repoName) - - // Try trusted run on untrusted tag - cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 125, - Err: "does not have trust data for", - }) -} - -func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") - if err != nil { - c.Fatalf("Failed to create local temp dir") - } - - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - cli.DockerCmd(c, "rmi", repoName) - - // Try run - cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, SuccessTagging) - cli.DockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - if err != nil { - c.Fatalf("Restarting notary server failed.") - } - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - cli.DockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - cli.Docker(cli.Args("--config", evilLocalConfigDir, "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - // Now, try running with the original client from this new trust server. This should fail because the new root is invalid. - cli.Docker(cli.Args("run", repoName), trustedCmd).Assert(c, icmd.Expected{ - ExitCode: 125, - Err: "could not rotate trust to a new trusted root", - }) -} - func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { // Not applicable on Windows as uses Unix specific functionality testRequires(c, DaemonIsLinux, SameHostDaemon) @@ -4357,7 +4288,7 @@ func (s *DockerSuite) TestSlowStdinClosing(c *check.C) { }() select { - case <-time.After(15 * time.Second): + case <-time.After(30 * time.Second): c.Fatal("running container timed out") // cleanup in teardown case err := <-done: c.Assert(err, checker.IsNil) diff --git a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go index 8add18e1e..4b3358255 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_cli_swarm_test.go @@ -1560,78 +1560,6 @@ func (s *DockerSwarmSuite) TestSwarmNetworkIPAMOptions(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Contains, "com.docker.network.ipam.serial:true") } -func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) { - d := s.swarmSuite.AddDaemon(c, true, true) - - // Attempt creating a service from an image that is known to notary. - repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") - - name := "trusted" - cli.Docker(cli.Args("-D", "service", "create", "--detach", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ - Err: "resolved image tag to", - }) - - out, err := d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, repoName+"@", check.Commentf(out)) - - // Try trusted service create on an untrusted tag. - - repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.DockerCmd(c, "push", repoName) - cli.DockerCmd(c, "rmi", repoName) - - name = "untrusted" - cli.Docker(cli.Args("service", "create", "--detach", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "Error: remote trust data does not exist", - }) - - out, err = d.Cmd("service", "inspect", "--pretty", name) - c.Assert(err, checker.NotNil, check.Commentf(out)) -} - -func (s *DockerTrustedSwarmSuite) TestTrustedServiceUpdate(c *check.C) { - d := s.swarmSuite.AddDaemon(c, true, true) - - // Attempt creating a service from an image that is known to notary. - repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull") - - name := "myservice" - - // Create a service without content trust - cli.Docker(cli.Args("service", "create", "--detach", "--no-resolve-image", "--name", name, repoName, "top"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success) - - result := cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)) - c.Assert(result.Error, checker.IsNil, check.Commentf(result.Combined())) - // Daemon won't insert the digest because this is disabled by - // DOCKER_SERVICE_PREFER_OFFLINE_IMAGE. - c.Assert(result.Combined(), check.Not(checker.Contains), repoName+"@", check.Commentf(result.Combined())) - - cli.Docker(cli.Args("-D", "service", "update", "--detach", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ - Err: "resolved image tag to", - }) - - cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ - Out: repoName + "@", - }) - - // Try trusted service update on an untrusted tag. - - repoName = fmt.Sprintf("%v/untrustedservicecreate/createtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.DockerCmd(c, "push", repoName) - cli.DockerCmd(c, "rmi", repoName) - - cli.Docker(cli.Args("service", "update", "--detach", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{ - ExitCode: 1, - Err: "Error: remote trust data does not exist", - }) -} - // Test case for issue #27866, which did not allow NW name that is the prefix of a swarm NW ID. // e.g. if the ingress ID starts with "n1", it was impossible to create a NW named "n1". func (s *DockerSwarmSuite) TestSwarmNetworkCreateIssue27866(c *check.C) { diff --git a/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go b/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go index ea780cc6e..147555823 100644 --- a/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go +++ b/vendor/github.com/docker/docker/integration-cli/docker_utils_test.go @@ -202,12 +202,6 @@ func buildImage(name string, cmdOperators ...cli.CmdOperator) *icmd.Result { return cli.Docker(cli.Build(name), cmdOperators...) } -// Deprecated: use trustedcmd -func trustedBuild(cmd *icmd.Cmd) func() { - trustedCmd(cmd) - return nil -} - // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. @@ -306,13 +300,6 @@ func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *registry.V2 return reg } -func setupNotary(c *check.C) *testNotary { - ts, err := newTestNotary(c) - c.Assert(err, check.IsNil) - - return ts -} - // appendBaseEnv appends the minimum set of environment variables to exec the // docker cli binary for testing with correct configuration to the given env // list. diff --git a/vendor/github.com/docker/docker/integration-cli/requirements_test.go b/vendor/github.com/docker/docker/integration-cli/requirements_test.go index 838977e70..b0a95d42f 100644 --- a/vendor/github.com/docker/docker/integration-cli/requirements_test.go +++ b/vendor/github.com/docker/docker/integration-cli/requirements_test.go @@ -112,22 +112,6 @@ func Apparmor() bool { return err == nil && len(buf) > 1 && buf[0] == 'Y' } -func NotaryHosting() bool { - // for now notary binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // notary-server binary is in PATH. - _, err := exec.LookPath(notaryServerBinary) - return err == nil -} - -func NotaryServerHosting() bool { - // for now notary-server binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // notary-server binary is in PATH. - _, err := exec.LookPath(notaryServerBinary) - return err == nil -} - func Devicemapper() bool { return strings.HasPrefix(testEnv.DaemonInfo.Driver, "devicemapper") } diff --git a/vendor/github.com/docker/docker/integration-cli/trust_server_test.go b/vendor/github.com/docker/docker/integration-cli/trust_server_test.go deleted file mode 100644 index f312083ee..000000000 --- a/vendor/github.com/docker/docker/integration-cli/trust_server_test.go +++ /dev/null @@ -1,334 +0,0 @@ -package main - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/docker/docker/api/types" - cliconfig "github.com/docker/docker/cli/config" - "github.com/docker/docker/integration-cli/checker" - "github.com/docker/docker/integration-cli/cli" - "github.com/docker/docker/integration-cli/fixtures/plugin" - "github.com/docker/go-connections/tlsconfig" - "github.com/go-check/check" - "github.com/gotestyourself/gotestyourself/icmd" -) - -var notaryBinary = "notary" -var notaryServerBinary = "notary-server" - -type keyPair struct { - Public string - Private string -} - -type testNotary struct { - cmd *exec.Cmd - dir string - keys []keyPair -} - -const notaryHost = "localhost:4443" -const notaryURL = "https://" + notaryHost - -var SuccessTagging = icmd.Expected{ - Out: "Tagging", -} - -var SuccessSigningAndPushing = icmd.Expected{ - Out: "Signing and pushing trust metadata", -} - -var SuccessDownloaded = icmd.Expected{ - Out: "Status: Downloaded", -} - -var SuccessDownloadedOnStderr = icmd.Expected{ - Err: "Status: Downloaded", -} - -func newTestNotary(c *check.C) (*testNotary, error) { - // generate server config - template := `{ - "server": { - "http_addr": "%s", - "tls_key_file": "%s", - "tls_cert_file": "%s" - }, - "trust_service": { - "type": "local", - "hostname": "", - "port": "", - "key_algorithm": "ed25519" - }, - "logging": { - "level": "debug" - }, - "storage": { - "backend": "memory" - } -}` - tmp, err := ioutil.TempDir("", "notary-test-") - if err != nil { - return nil, err - } - confPath := filepath.Join(tmp, "config.json") - config, err := os.Create(confPath) - if err != nil { - return nil, err - } - defer config.Close() - - workingDir, err := os.Getwd() - if err != nil { - return nil, err - } - if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - // generate client config - clientConfPath := filepath.Join(tmp, "client-config.json") - clientConfig, err := os.Create(clientConfPath) - if err != nil { - return nil, err - } - defer clientConfig.Close() - - template = `{ - "trust_dir" : "%s", - "remote_server": { - "url": "%s", - "skipTLSVerify": true - } -}` - if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.Dir(), "trust"), notaryURL); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - // load key fixture filenames - var keys []keyPair - for i := 1; i < 5; i++ { - keys = append(keys, keyPair{ - Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), - Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), - }) - } - - // run notary-server - cmd := exec.Command(notaryServerBinary, "-config", confPath) - if err := cmd.Start(); err != nil { - os.RemoveAll(tmp) - if os.IsNotExist(err) { - c.Skip(err.Error()) - } - return nil, err - } - - testNotary := &testNotary{ - cmd: cmd, - dir: tmp, - keys: keys, - } - - // Wait for notary to be ready to serve requests. - for i := 1; i <= 20; i++ { - if err = testNotary.Ping(); err == nil { - break - } - time.Sleep(10 * time.Millisecond * time.Duration(i*i)) - } - - if err != nil { - c.Fatalf("Timeout waiting for test notary to become available: %s", err) - } - - return testNotary, nil -} - -func (t *testNotary) Ping() error { - tlsConfig := tlsconfig.ClientDefault() - tlsConfig.InsecureSkipVerify = true - client := http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - }, - } - resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) - } - return nil -} - -func (t *testNotary) Close() { - t.cmd.Process.Kill() - t.cmd.Process.Wait() - os.RemoveAll(t.dir) -} - -func trustedCmd(cmd *icmd.Cmd) func() { - pwd := "12345678" - cmd.Env = append(cmd.Env, trustEnv(notaryURL, pwd, pwd)...) - return nil -} - -func trustedCmdWithServer(server string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - pwd := "12345678" - cmd.Env = append(cmd.Env, trustEnv(server, pwd, pwd)...) - return nil - } -} - -func trustedCmdWithPassphrases(rootPwd, repositoryPwd string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Env = append(cmd.Env, trustEnv(notaryURL, rootPwd, repositoryPwd)...) - return nil - } -} - -func trustEnv(server, rootPwd, repositoryPwd string) []string { - env := append(os.Environ(), []string{ - "DOCKER_CONTENT_TRUST=1", - fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), - fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), - fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), - }...) - return env -} - -func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) - // tag the image and upload it to the private registry - cli.DockerCmd(c, "tag", "busybox", repoName) - cli.Docker(cli.Args("push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - cli.DockerCmd(c, "rmi", repoName) - return repoName -} - -func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) - - client := testEnv.APIClient() - - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - err := plugin.Create(ctx, client, repoName) - cancel() - c.Assert(err, checker.IsNil, check.Commentf("could not create test plugin")) - - // tag the image and upload it to the private registry - // TODO: shouldn't need to use the CLI to do trust - cli.Docker(cli.Args("plugin", "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - - ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) - err = client.PluginRemove(ctx, repoName, types.PluginRemoveOptions{Force: true}) - cancel() - c.Assert(err, checker.IsNil, check.Commentf("failed to cleanup test plugin for trust suite")) - return repoName -} - -func (s *DockerTrustSuite) notaryCmd(c *check.C, args ...string) string { - pwd := "12345678" - env := []string{ - fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), - } - result := icmd.RunCmd(icmd.Cmd{ - Command: append([]string{notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json")}, args...), - Env: append(os.Environ(), env...), - }) - result.Assert(c, icmd.Success) - return result.Combined() -} - -func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { - s.notaryCmd(c, "init", repoName) -} - -func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { - pathsArg := "--all-paths" - if len(paths) > 0 { - pathsArg = "--paths=" + strings.Join(paths, ",") - } - - s.notaryCmd(c, "delegation", "add", repoName, role, pubKey, pathsArg) -} - -func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { - s.notaryCmd(c, "publish", repoName) -} - -func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { - s.notaryCmd(c, "key", "import", privKey, "-g", repoName, "-r", role) -} - -func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { - out := s.notaryCmd(c, "list", repoName, "-r", role) - - // should look something like: - // NAME DIGEST SIZE (BYTES) ROLE - // ------------------------------------------------------------------------------------------------------ - // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets - - targets := make(map[string]string) - - // no target - lines := strings.Split(strings.TrimSpace(out), "\n") - if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { - return targets - } - - // otherwise, there is at least one target - c.Assert(len(lines), checker.GreaterOrEqualThan, 3) - - for _, line := range lines[2:] { - tokens := strings.Fields(line) - c.Assert(tokens, checker.HasLen, 4) - targets[tokens[0]] = tokens[3] - } - - return targets -} - -func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { - // check all the roles - for _, role := range roles { - targets := s.notaryListTargetsInRole(c, repoName, role) - roleName, ok := targets[target] - c.Assert(ok, checker.True) - c.Assert(roleName, checker.Equals, role) - } -} - -func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { - targets := s.notaryListTargetsInRole(c, repoName, "targets") - - roleName, ok := targets[target] - if ok { - for _, role := range roles { - c.Assert(roleName, checker.Not(checker.Equals), role) - } - } -} diff --git a/vendor/github.com/docker/docker/integration/build/build_test.go b/vendor/github.com/docker/docker/integration/build/build_test.go index 124f1107f..d8cb298cd 100644 --- a/vendor/github.com/docker/docker/integration/build/build_test.go +++ b/vendor/github.com/docker/docker/integration/build/build_test.go @@ -5,6 +5,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "strings" @@ -12,11 +13,13 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" "github.com/docker/docker/integration-cli/cli/build/fakecontext" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/jsonmessage" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" + "github.com/gotestyourself/gotestyourself/skip" ) func TestBuildWithRemoveAndForceRemove(t *testing.T) { @@ -94,21 +97,21 @@ func TestBuildWithRemoveAndForceRemove(t *testing.T) { buff := bytes.NewBuffer(nil) tw := tar.NewWriter(buff) - require.NoError(t, tw.WriteHeader(&tar.Header{ + assert.NilError(t, tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), })) _, err := tw.Write(dockerfile) - require.NoError(t, err) - require.NoError(t, tw.Close()) + assert.NilError(t, err) + assert.NilError(t, tw.Close()) resp, err := client.ImageBuild(ctx, buff, types.ImageBuildOptions{Remove: c.rm, ForceRemove: c.forceRm, NoCache: true}) - require.NoError(t, err) + assert.NilError(t, err) defer resp.Body.Close() filter, err := buildContainerIdsFilter(resp.Body) - require.NoError(t, err) + assert.NilError(t, err) remainingContainers, err := client.ContainerList(ctx, types.ContainerListOptions{Filters: filter, All: true}) - require.NoError(t, err) - require.Equal(t, c.numberOfIntermediateContainers, len(remainingContainers), "Expected %v remaining intermediate containers, got %v", c.numberOfIntermediateContainers, len(remainingContainers)) + assert.NilError(t, err) + assert.Equal(t, c.numberOfIntermediateContainers, len(remainingContainers), "Expected %v remaining intermediate containers, got %v", c.numberOfIntermediateContainers, len(remainingContainers)) }) } } @@ -158,16 +161,16 @@ func TestBuildMultiStageParentConfig(t *testing.T) { ForceRemove: true, Tags: []string{"build1"}, }) - require.NoError(t, err) + assert.NilError(t, err) _, err = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() - require.NoError(t, err) + assert.NilError(t, err) image, _, err := apiclient.ImageInspectWithRaw(ctx, "build1") - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, "/foo/sub2", image.Config.WorkingDir) - assert.Contains(t, image.Config.Env, "WHO=parent") + assert.Check(t, is.Equal("/foo/sub2", image.Config.WorkingDir)) + assert.Check(t, is.Contains(image.Config.Env, "WHO=parent")) } func TestBuildWithEmptyLayers(t *testing.T) { @@ -192,10 +195,10 @@ func TestBuildWithEmptyLayers(t *testing.T) { Remove: true, ForceRemove: true, }) - require.NoError(t, err) + assert.NilError(t, err) _, err = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() - require.NoError(t, err) + assert.NilError(t, err) } // TestBuildMultiStageOnBuild checks that ONBUILD commands are applied to @@ -228,20 +231,20 @@ RUN cat somefile` }) out := bytes.NewBuffer(nil) - require.NoError(t, err) + assert.NilError(t, err) _, err = io.Copy(out, resp.Body) resp.Body.Close() - require.NoError(t, err) + assert.NilError(t, err) - assert.Contains(t, out.String(), "Successfully built") + assert.Check(t, is.Contains(out.String(), "Successfully built")) imageIDs, err := getImageIDsFromBuild(out.Bytes()) - require.NoError(t, err) - assert.Equal(t, 3, len(imageIDs)) + assert.NilError(t, err) + assert.Check(t, is.Equal(3, len(imageIDs))) image, _, err := apiclient.ImageInspectWithRaw(context.Background(), imageIDs[2]) - require.NoError(t, err) - assert.Contains(t, image.Config.Env, "bar=baz") + assert.NilError(t, err) + assert.Check(t, is.Contains(image.Config.Env, "bar=baz")) } // #35403 #36122 @@ -260,7 +263,7 @@ COPY bar /` writeTarRecord(t, w, "../foo", "foocontents0") writeTarRecord(t, w, "/bar", "barcontents0") err := w.Close() - require.NoError(t, err) + assert.NilError(t, err) apiclient := testEnv.APIClient() resp, err := apiclient.ImageBuild(ctx, @@ -271,10 +274,10 @@ COPY bar /` }) out := bytes.NewBuffer(nil) - require.NoError(t, err) + assert.NilError(t, err) _, err = io.Copy(out, resp.Body) resp.Body.Close() - require.NoError(t, err) + assert.NilError(t, err) // repeat with changed data should not cause cache hits @@ -284,7 +287,7 @@ COPY bar /` writeTarRecord(t, w, "../foo", "foocontents1") writeTarRecord(t, w, "/bar", "barcontents1") err = w.Close() - require.NoError(t, err) + assert.NilError(t, err) resp, err = apiclient.ImageBuild(ctx, buf, @@ -294,16 +297,19 @@ COPY bar /` }) out = bytes.NewBuffer(nil) - require.NoError(t, err) + assert.NilError(t, err) _, err = io.Copy(out, resp.Body) resp.Body.Close() - require.NoError(t, err) - require.NotContains(t, out.String(), "Using cache") + assert.NilError(t, err) + assert.Assert(t, !strings.Contains(out.String(), "Using cache")) } // docker/for-linux#135 // #35641 func TestBuildMultiStageLayerLeak(t *testing.T) { + fmt.Println(testEnv.DaemonAPIVersion()) + skip.IfCondition(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.38"), + "Don't run on API lower than 1.38 as it has been fixed starting from that version") ctx := context.TODO() defer setupTest(t)() @@ -333,12 +339,12 @@ RUN [ ! -f foo ] }) out := bytes.NewBuffer(nil) - require.NoError(t, err) + assert.NilError(t, err) _, err = io.Copy(out, resp.Body) resp.Body.Close() - require.NoError(t, err) + assert.NilError(t, err) - assert.Contains(t, out.String(), "Successfully built") + assert.Check(t, is.Contains(out.String(), "Successfully built")) } func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) { @@ -348,9 +354,9 @@ func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) { Size: int64(len(contents)), Typeflag: '0', }) - require.NoError(t, err) + assert.NilError(t, err) _, err = w.Write([]byte(contents)) - require.NoError(t, err) + assert.NilError(t, err) } type buildLine struct { diff --git a/vendor/github.com/docker/docker/integration/config/config_test.go b/vendor/github.com/docker/docker/integration/config/config_test.go index 912f55a12..65323e2e5 100644 --- a/vendor/github.com/docker/docker/integration/config/config_test.go +++ b/vendor/github.com/docker/docker/integration/config/config_test.go @@ -14,9 +14,9 @@ import ( "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/internal/testutil" "github.com/docker/docker/pkg/stdcopy" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -27,14 +27,14 @@ func TestConfigList(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() // This test case is ported from the original TestConfigsEmptyList configs, err := client.ConfigList(ctx, types.ConfigListOptions{}) - require.NoError(t, err) - assert.Equal(t, len(configs), 0) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(configs), 0)) testName0 := "test0" testName1 := "test1" @@ -57,8 +57,8 @@ func TestConfigList(t *testing.T) { // test by `config ls` entries, err := client.ConfigList(ctx, types.ConfigListOptions{}) - require.NoError(t, err) - assert.Equal(t, names(entries), testNames) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), testNames)) testCases := []struct { filters filters.Args @@ -92,8 +92,8 @@ func TestConfigList(t *testing.T) { entries, err = client.ConfigList(ctx, types.ConfigListOptions{ Filters: tc.filters, }) - require.NoError(t, err) - assert.Equal(t, names(entries), tc.expected) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), tc.expected)) } } @@ -106,8 +106,8 @@ func createConfig(ctx context.Context, t *testing.T, client client.APIClient, na }, Data: data, }) - require.NoError(t, err) - assert.NotEqual(t, config.ID, "") + assert.NilError(t, err) + assert.Check(t, config.ID != "") return config.ID } @@ -118,7 +118,7 @@ func TestConfigsCreateAndDelete(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -128,12 +128,12 @@ func TestConfigsCreateAndDelete(t *testing.T) { configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) insp, _, err := client.ConfigInspectWithRaw(ctx, configID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Name, testName) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Name, testName)) // This test case is ported from the original TestConfigsDelete err = client.ConfigRemove(ctx, configID) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.ConfigInspectWithRaw(ctx, configID) testutil.ErrorContains(t, err, "No such config") @@ -146,7 +146,7 @@ func TestConfigsUpdate(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -156,35 +156,35 @@ func TestConfigsUpdate(t *testing.T) { configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) insp, _, err := client.ConfigInspectWithRaw(ctx, configID) - require.NoError(t, err) - assert.Equal(t, insp.ID, configID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.ID, configID)) // test UpdateConfig with full ID insp.Spec.Labels = map[string]string{"test": "test1"} err = client.ConfigUpdate(ctx, configID, insp.Version, insp.Spec) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.ConfigInspectWithRaw(ctx, configID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Labels["test"], "test1") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test1")) // test UpdateConfig with full name insp.Spec.Labels = map[string]string{"test": "test2"} err = client.ConfigUpdate(ctx, testName, insp.Version, insp.Spec) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.ConfigInspectWithRaw(ctx, configID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Labels["test"], "test2") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test2")) // test UpdateConfig with prefix ID insp.Spec.Labels = map[string]string{"test": "test3"} err = client.ConfigUpdate(ctx, configID[:1], insp.Version, insp.Spec) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.ConfigInspectWithRaw(ctx, configID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Labels["test"], "test3") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test3")) // test UpdateConfig in updating Data which is not supported in daemon // this test will produce an error in func UpdateConfig @@ -207,7 +207,7 @@ func TestTemplatedConfig(t *testing.T) { Data: []byte("this is a secret"), } referencedSecret, err := client.SecretCreate(ctx, referencedSecretSpec) - assert.NoError(t, err) + assert.Check(t, err) referencedConfigSpec := swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ @@ -216,7 +216,7 @@ func TestTemplatedConfig(t *testing.T) { Data: []byte("this is a config"), } referencedConfig, err := client.ConfigCreate(ctx, referencedConfigSpec) - assert.NoError(t, err) + assert.Check(t, err) configSpec := swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ @@ -231,7 +231,7 @@ func TestTemplatedConfig(t *testing.T) { } templatedConfig, err := client.ConfigCreate(ctx, configSpec) - assert.NoError(t, err) + assert.Check(t, err) serviceID := swarm.CreateService(t, d, swarm.ServiceWithConfig( @@ -309,8 +309,8 @@ func TestTemplatedConfig(t *testing.T) { func assertAttachedStream(t *testing.T, attach types.HijackedResponse, expect string) { buf := bytes.NewBuffer(nil) _, err := stdcopy.StdCopy(buf, buf, attach.Reader) - require.NoError(t, err) - assert.Contains(t, buf.String(), expect) + assert.NilError(t, err) + assert.Check(t, is.Contains(buf.String(), expect)) } func waitAndAssert(t *testing.T, timeout time.Duration, f func(*testing.T) bool) { @@ -336,7 +336,7 @@ func TestConfigInspect(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -344,11 +344,11 @@ func TestConfigInspect(t *testing.T) { configID := createConfig(ctx, t, client, testName, []byte("TESTINGDATA"), nil) insp, body, err := client.ConfigInspectWithRaw(ctx, configID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Name, testName) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Name, testName)) var config swarmtypes.Config err = json.Unmarshal(body, &config) - require.NoError(t, err) - assert.Equal(t, config, insp) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(config, insp)) } diff --git a/vendor/github.com/docker/docker/integration/container/copy_test.go b/vendor/github.com/docker/docker/integration/container/copy_test.go index 43dc31f2f..766c0a176 100644 --- a/vendor/github.com/docker/docker/integration/container/copy_test.go +++ b/vendor/github.com/docker/docker/integration/container/copy_test.go @@ -9,8 +9,9 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/require" ) func TestCopyFromContainerPathDoesNotExist(t *testing.T) { @@ -21,7 +22,7 @@ func TestCopyFromContainerPathDoesNotExist(t *testing.T) { cid := container.Create(t, ctx, apiclient) _, _, err := apiclient.CopyFromContainer(ctx, cid, "/dne") - require.True(t, client.IsErrNotFound(err)) + assert.Assert(t, client.IsErrNotFound(err)) expected := fmt.Sprintf("No such container:path: %s:%s", cid, "/dne") testutil.ErrorContains(t, err, expected) } @@ -35,7 +36,7 @@ func TestCopyFromContainerPathIsNotDir(t *testing.T) { cid := container.Create(t, ctx, apiclient) _, _, err := apiclient.CopyFromContainer(ctx, cid, "/etc/passwd/") - require.Contains(t, err.Error(), "not a directory") + assert.Assert(t, is.Contains(err.Error(), "not a directory")) } func TestCopyToContainerPathDoesNotExist(t *testing.T) { @@ -47,7 +48,7 @@ func TestCopyToContainerPathDoesNotExist(t *testing.T) { cid := container.Create(t, ctx, apiclient) err := apiclient.CopyToContainer(ctx, cid, "/dne", nil, types.CopyToContainerOptions{}) - require.True(t, client.IsErrNotFound(err)) + assert.Assert(t, client.IsErrNotFound(err)) expected := fmt.Sprintf("No such container:path: %s:%s", cid, "/dne") testutil.ErrorContains(t, err, expected) } @@ -61,5 +62,5 @@ func TestCopyToContainerPathIsNotDir(t *testing.T) { cid := container.Create(t, ctx, apiclient) err := apiclient.CopyToContainer(ctx, cid, "/etc/passwd/", nil, types.CopyToContainerOptions{}) - require.Contains(t, err.Error(), "not a directory") + assert.Assert(t, is.Contains(err.Error(), "not a directory")) } diff --git a/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go b/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go index 5077770f5..c2920502e 100644 --- a/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/daemon_linux_test.go @@ -11,8 +11,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration/internal/container" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" ) @@ -35,7 +35,7 @@ func TestContainerStartOnDaemonRestart(t *testing.T) { defer d.Stop(t) client, err := d.NewClient() - assert.NoError(t, err, "error creating client") + assert.Check(t, err, "error creating client") ctx := context.Background() @@ -43,36 +43,36 @@ func TestContainerStartOnDaemonRestart(t *testing.T) { defer client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{Force: true}) err = client.ContainerStart(ctx, cID, types.ContainerStartOptions{}) - assert.NoError(t, err, "error starting test container") + assert.Check(t, err, "error starting test container") inspect, err := client.ContainerInspect(ctx, cID) - assert.NoError(t, err, "error getting inspect data") + assert.Check(t, err, "error getting inspect data") ppid := getContainerdShimPid(t, inspect) err = d.Kill() - assert.NoError(t, err, "failed to kill test daemon") + assert.Check(t, err, "failed to kill test daemon") err = unix.Kill(inspect.State.Pid, unix.SIGKILL) - assert.NoError(t, err, "failed to kill container process") + assert.Check(t, err, "failed to kill container process") err = unix.Kill(ppid, unix.SIGKILL) - assert.NoError(t, err, "failed to kill containerd-shim") + assert.Check(t, err, "failed to kill containerd-shim") d.Start(t, "--iptables=false") err = client.ContainerStart(ctx, cID, types.ContainerStartOptions{}) - assert.NoError(t, err, "failed to start test container") + assert.Check(t, err, "failed to start test container") } func getContainerdShimPid(t *testing.T, c types.ContainerJSON) int { statB, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", c.State.Pid)) - assert.NoError(t, err, "error looking up containerd-shim pid") + assert.Check(t, err, "error looking up containerd-shim pid") // ppid is the 4th entry in `/proc/pid/stat` ppid, err := strconv.Atoi(strings.Fields(string(statB))[3]) - assert.NoError(t, err, "error converting ppid field to int") + assert.Check(t, err, "error converting ppid field to int") - assert.NotEqual(t, ppid, 1, "got unexpected ppid") + assert.Check(t, ppid != 1, "got unexpected ppid") return ppid } diff --git a/vendor/github.com/docker/docker/integration/container/diff_test.go b/vendor/github.com/docker/docker/integration/container/diff_test.go index de5ff4e21..56fb983b1 100644 --- a/vendor/github.com/docker/docker/integration/container/diff_test.go +++ b/vendor/github.com/docker/docker/integration/container/diff_test.go @@ -9,9 +9,8 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/archive" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestDiff(t *testing.T) { @@ -38,6 +37,6 @@ func TestDiff(t *testing.T) { } items, err := client.ContainerDiff(ctx, cID) - require.NoError(t, err) - assert.Equal(t, expected, items) + assert.NilError(t, err) + assert.DeepEqual(t, expected, items) } diff --git a/vendor/github.com/docker/docker/integration/container/exec_test.go b/vendor/github.com/docker/docker/integration/container/exec_test.go index 06835678f..1b710432d 100644 --- a/vendor/github.com/docker/docker/integration/container/exec_test.go +++ b/vendor/github.com/docker/docker/integration/container/exec_test.go @@ -9,7 +9,8 @@ import ( "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestExec(t *testing.T) { @@ -27,7 +28,7 @@ func TestExec(t *testing.T) { Cmd: strslice.StrSlice([]string{"sh", "-c", "env"}), }, ) - require.NoError(t, err) + assert.NilError(t, err) resp, err := client.ContainerExecAttach(ctx, id.ID, types.ExecStartCheck{ @@ -35,12 +36,12 @@ func TestExec(t *testing.T) { Tty: false, }, ) - require.NoError(t, err) + assert.NilError(t, err) defer resp.Close() r, err := ioutil.ReadAll(resp.Reader) - require.NoError(t, err) + assert.NilError(t, err) out := string(r) - require.NoError(t, err) - require.Contains(t, out, "PWD=/tmp", "exec command not running in expected /tmp working directory") - require.Contains(t, out, "FOO=BAR", "exec command not running with expected environment variable FOO") + assert.NilError(t, err) + assert.Assert(t, is.Contains(out, "PWD=/tmp"), "exec command not running in expected /tmp working directory") + assert.Assert(t, is.Contains(out, "FOO=BAR"), "exec command not running with expected environment variable FOO") } diff --git a/vendor/github.com/docker/docker/integration/container/export_test.go b/vendor/github.com/docker/docker/integration/container/export_test.go index 657b1fce4..f7f0295ce 100644 --- a/vendor/github.com/docker/docker/integration/container/export_test.go +++ b/vendor/github.com/docker/docker/integration/container/export_test.go @@ -7,14 +7,16 @@ import ( "time" "github.com/docker/docker/api/types" + containerTypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/jsonmessage" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // export an image and try to import it into a new one @@ -30,12 +32,12 @@ func TestExportContainerAndImportImage(t *testing.T) { reference := "repo/testexp:v1" exportResp, err := client.ContainerExport(ctx, cID) - require.NoError(t, err) + assert.NilError(t, err) importResp, err := client.ImageImport(ctx, types.ImageImportSource{ Source: exportResp, SourceName: "-", }, reference, types.ImageImportOptions{}) - require.NoError(t, err) + assert.NilError(t, err) // If the import is successfully, then the message output should contain // the image ID and match with the output from `docker images`. @@ -43,11 +45,40 @@ func TestExportContainerAndImportImage(t *testing.T) { dec := json.NewDecoder(importResp) var jm jsonmessage.JSONMessage err = dec.Decode(&jm) - require.NoError(t, err) + assert.NilError(t, err) images, err := client.ImageList(ctx, types.ImageListOptions{ Filters: filters.NewArgs(filters.Arg("reference", reference)), }) - require.NoError(t, err) - assert.Equal(t, jm.Status, images[0].ID) + assert.NilError(t, err) + assert.Check(t, is.Equal(jm.Status, images[0].ID)) +} + +// TestExportContainerAfterDaemonRestart checks that a container +// created before start of the currently running dockerd +// can be exported (as reported in #36561). To satisfy this +// condition, daemon restart is needed after container creation. +func TestExportContainerAfterDaemonRestart(t *testing.T) { + skip.If(t, testEnv.DaemonInfo.OSType != "linux") + skip.If(t, testEnv.IsRemoteDaemon()) + + d := daemon.New(t, "", "dockerd", daemon.Config{}) + client, err := d.NewClient() + assert.NilError(t, err) + + d.StartWithBusybox(t) + defer d.Stop(t) + + ctx := context.Background() + cfg := containerTypes.Config{ + Image: "busybox", + Cmd: []string{"top"}, + } + ctr, err := client.ContainerCreate(ctx, &cfg, nil, nil, "") + assert.NilError(t, err) + + d.Restart(t) + + _, err = client.ContainerExport(ctx, ctr.ID) + assert.NilError(t, err) } diff --git a/vendor/github.com/docker/docker/integration/container/inspect_test.go b/vendor/github.com/docker/docker/integration/container/inspect_test.go index c7ea23b51..0433522e6 100644 --- a/vendor/github.com/docker/docker/integration/container/inspect_test.go +++ b/vendor/github.com/docker/docker/integration/container/inspect_test.go @@ -9,10 +9,10 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestInspectCpusetInConfigPre120(t *testing.T) { @@ -22,7 +22,7 @@ func TestInspectCpusetInConfigPre120(t *testing.T) { client := request.NewAPIClient(t, client.WithVersion("1.19")) ctx := context.Background() - name := "cpusetinconfig-pre120" + name := "cpusetinconfig-pre120-" + t.Name() // Create container with up to-date-API container.Run(t, ctx, request.NewAPIClient(t), container.WithName(name), container.WithCmd("true"), @@ -33,16 +33,16 @@ func TestInspectCpusetInConfigPre120(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, name, "exited"), poll.WithDelay(100*time.Millisecond)) _, body, err := client.ContainerInspectWithRaw(ctx, name, false) - require.NoError(t, err) + assert.NilError(t, err) var inspectJSON map[string]interface{} err = json.Unmarshal(body, &inspectJSON) - require.NoError(t, err, "unable to unmarshal body for version 1.19: %s", err) + assert.NilError(t, err, "unable to unmarshal body for version 1.19: %s", err) config, ok := inspectJSON["Config"] - assert.Equal(t, true, ok, "Unable to find 'Config'") + assert.Check(t, is.Equal(true, ok), "Unable to find 'Config'") cfg := config.(map[string]interface{}) _, ok = cfg["Cpuset"] - assert.Equal(t, true, ok, "API version 1.19 expected to include Cpuset in 'Config'") + assert.Check(t, is.Equal(true, ok), "API version 1.19 expected to include Cpuset in 'Config'") } diff --git a/vendor/github.com/docker/docker/integration/container/kill_test.go b/vendor/github.com/docker/docker/integration/container/kill_test.go index 5fae91267..4df28966f 100644 --- a/vendor/github.com/docker/docker/integration/container/kill_test.go +++ b/vendor/github.com/docker/docker/integration/container/kill_test.go @@ -9,10 +9,10 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestKillContainerInvalidSignal(t *testing.T) { @@ -22,11 +22,11 @@ func TestKillContainerInvalidSignal(t *testing.T) { id := container.Run(t, ctx, client) err := client.ContainerKill(ctx, id, "0") - require.EqualError(t, err, "Error response from daemon: Invalid signal: 0") + assert.Error(t, err, "Error response from daemon: Invalid signal: 0") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err = client.ContainerKill(ctx, id, "SIG42") - require.EqualError(t, err, "Error response from daemon: Invalid signal: SIG42") + assert.Error(t, err, "Error response from daemon: Invalid signal: SIG42") poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) } @@ -62,7 +62,7 @@ func TestKillContainer(t *testing.T) { ctx := context.Background() id := container.Run(t, ctx, client) err := client.ContainerKill(ctx, id, tc.signal) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) @@ -102,7 +102,7 @@ func TestKillWithStopSignalAndRestartPolicies(t *testing.T) { } }) err := client.ContainerKill(ctx, id, "TERM") - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, tc.status), poll.WithDelay(100*time.Millisecond)) }) @@ -116,8 +116,8 @@ func TestKillStoppedContainer(t *testing.T) { client := request.NewAPIClient(t) id := container.Create(t, ctx, client) err := client.ContainerKill(ctx, id, "SIGKILL") - require.Error(t, err) - require.Contains(t, err.Error(), "is not running") + assert.Assert(t, is.ErrorContains(err, "")) + assert.Assert(t, is.Contains(err.Error(), "is not running")) } func TestKillStoppedContainerAPIPre120(t *testing.T) { @@ -127,7 +127,7 @@ func TestKillStoppedContainerAPIPre120(t *testing.T) { client := request.NewAPIClient(t, client.WithVersion("1.19")) id := container.Create(t, ctx, client) err := client.ContainerKill(ctx, id, "SIGKILL") - require.NoError(t, err) + assert.NilError(t, err) } func TestKillDifferentUserContainer(t *testing.T) { @@ -144,7 +144,7 @@ func TestKillDifferentUserContainer(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, id, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerKill(ctx, id, "SIGKILL") - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, id, "exited"), poll.WithDelay(100*time.Millisecond)) } @@ -162,8 +162,8 @@ func TestInspectOomKilledTrue(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, true, inspect.State.OOMKilled) + assert.NilError(t, err) + assert.Check(t, is.Equal(true, inspect.State.OOMKilled)) } func TestInspectOomKilledFalse(t *testing.T) { @@ -178,6 +178,6 @@ func TestInspectOomKilledFalse(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, false, inspect.State.OOMKilled) + assert.NilError(t, err) + assert.Check(t, is.Equal(false, inspect.State.OOMKilled)) } diff --git a/vendor/github.com/docker/docker/integration/container/links_linux_test.go b/vendor/github.com/docker/docker/integration/container/links_linux_test.go index d230898ed..ed5966bc7 100644 --- a/vendor/github.com/docker/docker/integration/container/links_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/links_linux_test.go @@ -1,22 +1,18 @@ package container // import "github.com/docker/docker/integration/container" import ( - "bytes" "context" "io/ioutil" "os" "testing" - "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" - "github.com/docker/docker/pkg/stdcopy" - "github.com/gotestyourself/gotestyourself/poll" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestLinksEtcHostsContentMatch(t *testing.T) { @@ -29,21 +25,13 @@ func TestLinksEtcHostsContentMatch(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - cID := container.Run(t, ctx, client, container.WithCmd("cat", "/etc/hosts"), container.WithNetworkMode("host")) + cID := container.Run(t, ctx, client, container.WithNetworkMode("host")) + res, err := container.Exec(ctx, client, cID, []string{"cat", "/etc/hosts"}) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) - poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) - - body, err := client.ContainerLogs(ctx, cID, types.ContainerLogsOptions{ - ShowStdout: true, - }) - require.NoError(t, err) - defer body.Close() - - var b bytes.Buffer - _, err = stdcopy.StdCopy(&b, ioutil.Discard, body) - require.NoError(t, err) - - assert.Equal(t, string(hosts), b.String()) + assert.Check(t, is.Equal(string(hosts), res.Stdout())) } func TestLinksContainerNames(t *testing.T) { @@ -61,7 +49,7 @@ func TestLinksContainerNames(t *testing.T) { containers, err := client.ContainerList(ctx, types.ContainerListOptions{ Filters: f, }) - require.NoError(t, err) - assert.Equal(t, 1, len(containers)) - assert.Equal(t, []string{"/first", "/second/first"}, containers[0].Names) + assert.NilError(t, err) + assert.Check(t, is.Equal(1, len(containers))) + assert.Check(t, is.DeepEqual([]string{"/first", "/second/first"}, containers[0].Names)) } diff --git a/vendor/github.com/docker/docker/integration/container/logs_test.go b/vendor/github.com/docker/docker/integration/container/logs_test.go index bae431527..9f536742b 100644 --- a/vendor/github.com/docker/docker/integration/container/logs_test.go +++ b/vendor/github.com/docker/docker/integration/container/logs_test.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/stdcopy" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" ) // Regression test for #35370 @@ -25,8 +25,8 @@ func TestLogsFollowTailEmpty(t *testing.T) { if logs != nil { defer logs.Close() } - assert.NoError(t, err) + assert.Check(t, err) _, err = stdcopy.StdCopy(ioutil.Discard, ioutil.Discard, logs) - assert.NoError(t, err) + assert.Check(t, err) } diff --git a/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go b/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go index 71bdccc71..e15786f8a 100644 --- a/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/mounts_linux_test.go @@ -16,10 +16,10 @@ import ( "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/system" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestContainerShmNoLeak(t *testing.T) { @@ -30,7 +30,7 @@ func TestContainerShmNoLeak(t *testing.T) { if err != nil { t.Fatal(err) } - d.StartWithBusybox(t) + d.StartWithBusybox(t, "--iptables=false") defer d.Stop(t) ctx := context.Background() @@ -130,14 +130,14 @@ func TestContainerNetworkMountsNoChown(t *testing.T) { } cli, err := client.NewEnvClient() - require.NoError(t, err) + assert.NilError(t, err) defer cli.Close() ctrCreate, err := cli.ContainerCreate(ctx, &config, &hostConfig, &network.NetworkingConfig{}, "") - require.NoError(t, err) + assert.NilError(t, err) // container will exit immediately because of no tty, but we only need the start sequence to test the condition err = cli.ContainerStart(ctx, ctrCreate.ID, types.ContainerStartOptions{}) - require.NoError(t, err) + assert.NilError(t, err) // Check that host-located bind mount network file did not change ownership when the container was started // Note: If the user specifies a mountpath from the host, we should not be @@ -150,8 +150,8 @@ func TestContainerNetworkMountsNoChown(t *testing.T) { // same line--we don't chown host file content. // See GitHub PR 34224 for details. statT, err := system.Stat(tmpNWFileMount) - require.NoError(t, err) - assert.Equal(t, uint32(0), statT.UID(), "bind mounted network file should not change ownership from root") + assert.NilError(t, err) + assert.Check(t, is.Equal(uint32(0), statT.UID()), "bind mounted network file should not change ownership from root") } func TestMountDaemonRoot(t *testing.T) { diff --git a/vendor/github.com/docker/docker/integration/container/nat_test.go b/vendor/github.com/docker/docker/integration/container/nat_test.go index 293ba9bba..5e7402770 100644 --- a/vendor/github.com/docker/docker/integration/container/nat_test.go +++ b/vendor/github.com/docker/docker/integration/container/nat_test.go @@ -15,10 +15,10 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/go-connections/nat" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestNetworkNat(t *testing.T) { @@ -31,12 +31,12 @@ func TestNetworkNat(t *testing.T) { endpoint := getExternalAddress(t) conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) - require.NoError(t, err) + assert.NilError(t, err) defer conn.Close() data, err := ioutil.ReadAll(conn) - require.NoError(t, err) - assert.Equal(t, msg, strings.TrimSpace(string(data))) + assert.NilError(t, err) + assert.Check(t, is.Equal(msg, strings.TrimSpace(string(data)))) } func TestNetworkLocalhostTCPNat(t *testing.T) { @@ -48,47 +48,49 @@ func TestNetworkLocalhostTCPNat(t *testing.T) { startServerContainer(t, msg, 8081) conn, err := net.Dial("tcp", "localhost:8081") - require.NoError(t, err) + assert.NilError(t, err) defer conn.Close() data, err := ioutil.ReadAll(conn) - require.NoError(t, err) - assert.Equal(t, msg, strings.TrimSpace(string(data))) + assert.NilError(t, err) + assert.Check(t, is.Equal(msg, strings.TrimSpace(string(data)))) } func TestNetworkLoopbackNat(t *testing.T) { skip.If(t, testEnv.IsRemoteDaemon()) + defer setupTest(t)() + msg := "it works" - startServerContainer(t, msg, 8080) + serverContainerID := startServerContainer(t, msg, 8080) endpoint := getExternalAddress(t) client := request.NewAPIClient(t) ctx := context.Background() - cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:server")) + cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID)) poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) body, err := client.ContainerLogs(ctx, cID, types.ContainerLogsOptions{ ShowStdout: true, }) - require.NoError(t, err) + assert.NilError(t, err) defer body.Close() var b bytes.Buffer _, err = io.Copy(&b, body) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, msg, strings.TrimSpace(b.String())) + assert.Check(t, is.Equal(msg, strings.TrimSpace(b.String()))) } func startServerContainer(t *testing.T, msg string, port int) string { client := request.NewAPIClient(t) ctx := context.Background() - cID := container.Run(t, ctx, client, container.WithName("server"), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) { + cID := container.Run(t, ctx, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) { c.HostConfig.PortBindings = nat.PortMap{ nat.Port(fmt.Sprintf("%d/tcp", port)): []nat.PortBinding{ { @@ -108,11 +110,11 @@ func getExternalAddress(t *testing.T) net.IP { skip.If(t, err != nil, "Test not running with `make test-integration`. Interface eth0 not found: %s", err) ifaceAddrs, err := iface.Addrs() - require.NoError(t, err) - assert.NotEqual(t, 0, len(ifaceAddrs)) + assert.NilError(t, err) + assert.Check(t, 0 != len(ifaceAddrs)) ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) - require.NoError(t, err) + assert.NilError(t, err) return ifaceIP } diff --git a/vendor/github.com/docker/docker/integration/container/pause_test.go b/vendor/github.com/docker/docker/integration/container/pause_test.go index bf9f9c3d8..dd8356f85 100644 --- a/vendor/github.com/docker/docker/integration/container/pause_test.go +++ b/vendor/github.com/docker/docker/integration/container/pause_test.go @@ -12,10 +12,10 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestPause(t *testing.T) { @@ -31,14 +31,14 @@ func TestPause(t *testing.T) { since := request.DaemonUnixTime(ctx, t, client, testEnv) err := client.ContainerPause(ctx, cID) - require.NoError(t, err) + assert.NilError(t, err) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, true, inspect.State.Paused) + assert.NilError(t, err) + assert.Check(t, is.Equal(true, inspect.State.Paused)) err = client.ContainerUnpause(ctx, cID) - require.NoError(t, err) + assert.NilError(t, err) until := request.DaemonUnixTime(ctx, t, client, testEnv) @@ -47,7 +47,7 @@ func TestPause(t *testing.T) { Until: until, Filters: filters.NewArgs(filters.Arg("container", cID)), }) - assert.Equal(t, []string{"pause", "unpause"}, getEventActions(t, messages, errs)) + assert.Check(t, is.DeepEqual([]string{"pause", "unpause"}, getEventActions(t, messages, errs))) } func TestPauseFailsOnWindowsServerContainers(t *testing.T) { @@ -75,10 +75,10 @@ func TestPauseStopPausedContainer(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) err := client.ContainerPause(ctx, cID) - require.NoError(t, err) + assert.NilError(t, err) err = client.ContainerStop(ctx, cID, nil) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond)) } @@ -88,7 +88,7 @@ func getEventActions(t *testing.T, messages <-chan events.Message, errs <-chan e for { select { case err := <-errs: - assert.True(t, err == nil || err == io.EOF) + assert.Check(t, err == nil || err == io.EOF) return actions case e := <-messages: actions = append(actions, e.Status) diff --git a/vendor/github.com/docker/docker/integration/container/ps_test.go b/vendor/github.com/docker/docker/integration/container/ps_test.go index 358276b36..4dacef165 100644 --- a/vendor/github.com/docker/docker/integration/container/ps_test.go +++ b/vendor/github.com/docker/docker/integration/container/ps_test.go @@ -8,8 +8,8 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestPsFilter(t *testing.T) { @@ -17,9 +17,10 @@ func TestPsFilter(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - prev := container.Create(t, ctx, client, container.WithName("prev")) - container.Create(t, ctx, client, container.WithName("top")) - next := container.Create(t, ctx, client, container.WithName("next")) + prev := container.Create(t, ctx, client, container.WithName("prev-"+t.Name())) + topContainerName := "top-" + t.Name() + container.Create(t, ctx, client, container.WithName(topContainerName)) + next := container.Create(t, ctx, client, container.WithName("next-"+t.Name())) containerIDs := func(containers []types.Container) []string { entries := []string{} @@ -30,20 +31,20 @@ func TestPsFilter(t *testing.T) { } f1 := filters.NewArgs() - f1.Add("since", "top") + f1.Add("since", topContainerName) q1, err := client.ContainerList(ctx, types.ContainerListOptions{ All: true, Filters: f1, }) - require.NoError(t, err) - assert.Contains(t, containerIDs(q1), next) + assert.NilError(t, err) + assert.Check(t, is.Contains(containerIDs(q1), next)) f2 := filters.NewArgs() - f2.Add("before", "top") + f2.Add("before", topContainerName) q2, err := client.ContainerList(ctx, types.ContainerListOptions{ All: true, Filters: f2, }) - require.NoError(t, err) - assert.Contains(t, containerIDs(q2), prev) + assert.NilError(t, err) + assert.Check(t, is.Contains(containerIDs(q2), prev)) } diff --git a/vendor/github.com/docker/docker/integration/container/remove_test.go b/vendor/github.com/docker/docker/integration/container/remove_test.go index 98aacdd20..bbc521b05 100644 --- a/vendor/github.com/docker/docker/integration/container/remove_test.go +++ b/vendor/github.com/docker/docker/integration/container/remove_test.go @@ -11,11 +11,11 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/fs" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { @@ -42,12 +42,12 @@ func TestRemoveContainerWithRemovedVolume(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) err := os.RemoveAll(tempDir.Path()) - require.NoError(t, err) + assert.NilError(t, err) err = client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ RemoveVolumes: true, }) - require.NoError(t, err) + assert.NilError(t, err) _, _, err = client.ContainerInspectWithRaw(ctx, cID, true) testutil.ErrorContains(t, err, "No such container") @@ -65,18 +65,18 @@ func TestRemoveContainerWithVolume(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) insp, _, err := client.ContainerInspectWithRaw(ctx, cID, true) - require.NoError(t, err) - assert.Equal(t, 1, len(insp.Mounts)) + assert.NilError(t, err) + assert.Check(t, is.Equal(1, len(insp.Mounts))) volName := insp.Mounts[0].Name err = client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ RemoveVolumes: true, }) - require.NoError(t, err) + assert.NilError(t, err) volumes, err := client.VolumeList(ctx, filters.NewArgs(filters.Arg("name", volName))) - require.NoError(t, err) - assert.Equal(t, 0, len(volumes.Volumes)) + assert.NilError(t, err) + assert.Check(t, is.Equal(0, len(volumes.Volumes))) } func TestRemoveContainerRunning(t *testing.T) { @@ -100,7 +100,7 @@ func TestRemoveContainerForceRemoveRunning(t *testing.T) { err := client.ContainerRemove(ctx, cID, types.ContainerRemoveOptions{ Force: true, }) - require.NoError(t, err) + assert.NilError(t, err) } func TestRemoveInvalidContainer(t *testing.T) { diff --git a/vendor/github.com/docker/docker/integration/container/rename_test.go b/vendor/github.com/docker/docker/integration/container/rename_test.go index 3567aee1f..cdeec5d27 100644 --- a/vendor/github.com/docker/docker/integration/container/rename_test.go +++ b/vendor/github.com/docker/docker/integration/container/rename_test.go @@ -6,15 +6,16 @@ import ( "time" "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" "github.com/docker/docker/pkg/stringid" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // This test simulates the scenario mentioned in #31392: @@ -26,22 +27,24 @@ func TestRenameLinkedContainer(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - aID := container.Run(t, ctx, client, container.WithName("a0")) - bID := container.Run(t, ctx, client, container.WithName("b0"), container.WithLinks("a0")) + aName := "a0" + t.Name() + bName := "b0" + t.Name() + aID := container.Run(t, ctx, client, container.WithName(aName)) + bID := container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName)) - err := client.ContainerRename(ctx, aID, "a1") - require.NoError(t, err) + err := client.ContainerRename(ctx, aID, "a1"+t.Name()) + assert.NilError(t, err) - container.Run(t, ctx, client, container.WithName("a0")) + container.Run(t, ctx, client, container.WithName(aName)) err = client.ContainerRemove(ctx, bID, types.ContainerRemoveOptions{Force: true}) - require.NoError(t, err) + assert.NilError(t, err) - bID = container.Run(t, ctx, client, container.WithName("b0"), container.WithLinks("a0")) + bID = container.Run(t, ctx, client, container.WithName(bName), container.WithLinks(aName)) inspect, err := client.ContainerInspect(ctx, bID) - require.NoError(t, err) - assert.Equal(t, []string{"/a0:/b0/a0"}, inspect.HostConfig.Links) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]string{"/" + aName + ":/" + bName + "/" + aName}, inspect.HostConfig.Links)) } func TestRenameStoppedContainer(t *testing.T) { @@ -49,21 +52,21 @@ func TestRenameStoppedContainer(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - oldName := "first_name" + oldName := "first_name" + t.Name() cID := container.Run(t, ctx, client, container.WithName(oldName), container.WithCmd("sh")) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, "/"+oldName, inspect.Name) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+oldName, inspect.Name)) newName := "new_name" + stringid.GenerateNonCryptoID() err = client.ContainerRename(ctx, oldName, newName) - require.NoError(t, err) + assert.NilError(t, err) inspect, err = client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, "/"+newName, inspect.Name) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+newName, inspect.Name)) } func TestRenameRunningContainerAndReuse(t *testing.T) { @@ -71,17 +74,17 @@ func TestRenameRunningContainerAndReuse(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - oldName := "first_name" + oldName := "first_name" + t.Name() cID := container.Run(t, ctx, client, container.WithName(oldName)) poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) newName := "new_name" + stringid.GenerateNonCryptoID() err := client.ContainerRename(ctx, oldName, newName) - require.NoError(t, err) + assert.NilError(t, err) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, "/"+newName, inspect.Name) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+newName, inspect.Name)) _, err = client.ContainerInspect(ctx, oldName) testutil.ErrorContains(t, err, "No such container: "+oldName) @@ -90,8 +93,8 @@ func TestRenameRunningContainerAndReuse(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) inspect, err = client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, "/"+oldName, inspect.Name) + assert.NilError(t, err) + assert.Check(t, is.Equal("/"+oldName, inspect.Name)) } func TestRenameInvalidName(t *testing.T) { @@ -99,7 +102,7 @@ func TestRenameInvalidName(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - oldName := "first_name" + oldName := "first_name" + t.Name() cID := container.Run(t, ctx, client, container.WithName(oldName)) poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) @@ -107,8 +110,8 @@ func TestRenameInvalidName(t *testing.T) { testutil.ErrorContains(t, err, "Invalid container name") inspect, err := client.ContainerInspect(ctx, oldName) - require.NoError(t, err) - assert.Equal(t, cID, inspect.ID) + assert.NilError(t, err) + assert.Check(t, is.Equal(cID, inspect.ID)) } // Test case for GitHub issue 22466 @@ -123,22 +126,26 @@ func TestRenameAnonymousContainer(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - _, err := client.NetworkCreate(ctx, "network1", types.NetworkCreate{}) - require.NoError(t, err) + networkName := "network1" + t.Name() + _, err := client.NetworkCreate(ctx, networkName, types.NetworkCreate{}) + + assert.NilError(t, err) cID := container.Run(t, ctx, client, func(c *container.TestContainerConfig) { c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - "network1": {}, + networkName: {}, } - c.HostConfig.NetworkMode = "network1" + c.HostConfig.NetworkMode = containertypes.NetworkMode(networkName) }) - err = client.ContainerRename(ctx, cID, "container1") - require.NoError(t, err) + + container1Name := "container1" + t.Name() + err = client.ContainerRename(ctx, cID, container1Name) + assert.NilError(t, err) // Stop/Start the container to get registered // FIXME(vdemeester) this is a really weird behavior as it fails otherwise - err = client.ContainerStop(ctx, "container1", nil) - require.NoError(t, err) - err = client.ContainerStart(ctx, "container1", types.ContainerStartOptions{}) - require.NoError(t, err) + err = client.ContainerStop(ctx, container1Name, nil) + assert.NilError(t, err) + err = client.ContainerStart(ctx, container1Name, types.ContainerStartOptions{}) + assert.NilError(t, err) poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) @@ -148,15 +155,15 @@ func TestRenameAnonymousContainer(t *testing.T) { } cID = container.Run(t, ctx, client, func(c *container.TestContainerConfig) { c.NetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - "network1": {}, + networkName: {}, } - c.HostConfig.NetworkMode = "network1" - }, container.WithCmd("ping", count, "1", "container1")) + c.HostConfig.NetworkMode = containertypes.NetworkMode(networkName) + }, container.WithCmd("ping", count, "1", container1Name)) poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, 0, inspect.State.ExitCode, "container %s exited with the wrong exitcode: %+v", cID, inspect) + assert.NilError(t, err) + assert.Check(t, is.Equal(0, inspect.State.ExitCode), "container %s exited with the wrong exitcode: %+v", cID, inspect) } // TODO: should be a unit test @@ -165,11 +172,13 @@ func TestRenameContainerWithSameName(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - cID := container.Run(t, ctx, client, container.WithName("old")) + oldName := "old" + t.Name() + cID := container.Run(t, ctx, client, container.WithName(oldName)) + poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) - err := client.ContainerRename(ctx, "old", "old") + err := client.ContainerRename(ctx, oldName, oldName) testutil.ErrorContains(t, err, "Renaming a container with the same name") - err = client.ContainerRename(ctx, cID, "old") + err = client.ContainerRename(ctx, cID, oldName) testutil.ErrorContains(t, err, "Renaming a container with the same name") } @@ -185,16 +194,19 @@ func TestRenameContainerWithLinkedContainer(t *testing.T) { ctx := context.Background() client := request.NewAPIClient(t) - db1ID := container.Run(t, ctx, client, container.WithName("db1")) + db1Name := "db1" + t.Name() + db1ID := container.Run(t, ctx, client, container.WithName(db1Name)) poll.WaitOn(t, container.IsInState(ctx, client, db1ID, "running"), poll.WithDelay(100*time.Millisecond)) - app1ID := container.Run(t, ctx, client, container.WithName("app1"), container.WithLinks("db1:/mysql")) + app1Name := "app1" + t.Name() + app2Name := "app2" + t.Name() + app1ID := container.Run(t, ctx, client, container.WithName(app1Name), container.WithLinks(db1Name+":/mysql")) poll.WaitOn(t, container.IsInState(ctx, client, app1ID, "running"), poll.WithDelay(100*time.Millisecond)) - err := client.ContainerRename(ctx, "app1", "app2") - require.NoError(t, err) + err := client.ContainerRename(ctx, app1Name, app2Name) + assert.NilError(t, err) - inspect, err := client.ContainerInspect(ctx, "app2/mysql") - require.NoError(t, err) - assert.Equal(t, db1ID, inspect.ID) + inspect, err := client.ContainerInspect(ctx, app2Name+"/mysql") + assert.NilError(t, err) + assert.Check(t, is.Equal(db1ID, inspect.ID)) } diff --git a/vendor/github.com/docker/docker/integration/container/resize_test.go b/vendor/github.com/docker/docker/integration/container/resize_test.go index 18438ea82..149ac3afd 100644 --- a/vendor/github.com/docker/docker/integration/container/resize_test.go +++ b/vendor/github.com/docker/docker/integration/container/resize_test.go @@ -11,9 +11,9 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestResize(t *testing.T) { @@ -29,7 +29,7 @@ func TestResize(t *testing.T) { Height: 40, Width: 40, }) - require.NoError(t, err) + assert.NilError(t, err) } func TestResizeWithInvalidSize(t *testing.T) { @@ -43,8 +43,8 @@ func TestResizeWithInvalidSize(t *testing.T) { endpoint := "/containers/" + cID + "/resize?h=foo&w=bar" res, _, err := req.Post(endpoint) - require.NoError(t, err) - assert.Equal(t, http.StatusBadRequest, res.StatusCode) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(http.StatusBadRequest, res.StatusCode)) } func TestResizeWhenContainerNotStarted(t *testing.T) { diff --git a/vendor/github.com/docker/docker/integration/container/stats_test.go b/vendor/github.com/docker/docker/integration/container/stats_test.go index 9c0b94849..d10808f8f 100644 --- a/vendor/github.com/docker/docker/integration/container/stats_test.go +++ b/vendor/github.com/docker/docker/integration/container/stats_test.go @@ -10,10 +10,10 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestStats(t *testing.T) { @@ -24,20 +24,20 @@ func TestStats(t *testing.T) { ctx := context.Background() info, err := client.Info(ctx) - require.NoError(t, err) + assert.NilError(t, err) cID := container.Run(t, ctx, client) poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond)) resp, err := client.ContainerStats(ctx, cID, false) - require.NoError(t, err) + assert.NilError(t, err) defer resp.Body.Close() var v *types.Stats err = json.NewDecoder(resp.Body).Decode(&v) - require.NoError(t, err) - assert.Equal(t, int64(v.MemoryStats.Limit), info.MemTotal) + assert.NilError(t, err) + assert.Check(t, is.Equal(int64(v.MemoryStats.Limit), info.MemTotal)) err = json.NewDecoder(resp.Body).Decode(&v) - require.Error(t, err, io.EOF) + assert.Assert(t, is.ErrorContains(err, ""), io.EOF) } diff --git a/vendor/github.com/docker/docker/integration/container/stop_test.go b/vendor/github.com/docker/docker/integration/container/stop_test.go index 4ecd06dd2..04aec2159 100644 --- a/vendor/github.com/docker/docker/integration/container/stop_test.go +++ b/vendor/github.com/docker/docker/integration/container/stop_test.go @@ -10,10 +10,10 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/icmd" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/require" ) func TestStopContainerWithRestartPolicyAlways(t *testing.T) { @@ -21,7 +21,7 @@ func TestStopContainerWithRestartPolicyAlways(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - names := []string{"verifyRestart1", "verifyRestart2"} + names := []string{"verifyRestart1-" + t.Name(), "verifyRestart2-" + t.Name()} for _, name := range names { container.Run(t, ctx, client, container.WithName(name), container.WithCmd("false"), func(c *container.TestContainerConfig) { c.HostConfig.RestartPolicy.Name = "always" @@ -34,7 +34,7 @@ func TestStopContainerWithRestartPolicyAlways(t *testing.T) { for _, name := range names { err := client.ContainerStop(ctx, name, nil) - require.NoError(t, err) + assert.NilError(t, err) } for _, name := range names { @@ -49,12 +49,12 @@ func TestDeleteDevicemapper(t *testing.T) { client := request.NewAPIClient(t) ctx := context.Background() - id := container.Run(t, ctx, client, container.WithName("foo"), container.WithCmd("echo")) + id := container.Run(t, ctx, client, container.WithName("foo-"+t.Name()), container.WithCmd("echo")) poll.WaitOn(t, container.IsStopped(ctx, client, id), poll.WithDelay(100*time.Millisecond)) inspect, err := client.ContainerInspect(ctx, id) - require.NoError(t, err) + assert.NilError(t, err) deviceID := inspect.GraphDriver.Data["DeviceId"] @@ -67,5 +67,5 @@ func TestDeleteDevicemapper(t *testing.T) { result.Assert(t, icmd.Success) err = client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{}) - require.NoError(t, err) + assert.NilError(t, err) } diff --git a/vendor/github.com/docker/docker/integration/container/update_linux_test.go b/vendor/github.com/docker/docker/integration/container/update_linux_test.go index c898dc1d3..a08417ea2 100644 --- a/vendor/github.com/docker/docker/integration/container/update_linux_test.go +++ b/vendor/github.com/docker/docker/integration/container/update_linux_test.go @@ -10,10 +10,10 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestUpdateMemory(t *testing.T) { @@ -44,26 +44,26 @@ func TestUpdateMemory(t *testing.T) { MemorySwap: setMemorySwap, }, }) - require.NoError(t, err) + assert.NilError(t, err) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, setMemory, inspect.HostConfig.Memory) - assert.Equal(t, setMemorySwap, inspect.HostConfig.MemorySwap) + assert.NilError(t, err) + assert.Check(t, is.Equal(setMemory, inspect.HostConfig.Memory)) + assert.Check(t, is.Equal(setMemorySwap, inspect.HostConfig.MemorySwap)) res, err := container.Exec(ctx, client, cID, []string{"cat", "/sys/fs/cgroup/memory/memory.limit_in_bytes"}) - require.NoError(t, err) - require.Empty(t, res.Stderr()) - require.Equal(t, 0, res.ExitCode) - assert.Equal(t, strconv.FormatInt(setMemory, 10), strings.TrimSpace(res.Stdout())) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) + assert.Check(t, is.Equal(strconv.FormatInt(setMemory, 10), strings.TrimSpace(res.Stdout()))) res, err = container.Exec(ctx, client, cID, []string{"cat", "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes"}) - require.NoError(t, err) - require.Empty(t, res.Stderr()) - require.Equal(t, 0, res.ExitCode) - assert.Equal(t, strconv.FormatInt(setMemorySwap, 10), strings.TrimSpace(res.Stdout())) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) + assert.Check(t, is.Equal(strconv.FormatInt(setMemorySwap, 10), strings.TrimSpace(res.Stdout()))) } func TestUpdateCPUQuota(t *testing.T) { @@ -93,15 +93,15 @@ func TestUpdateCPUQuota(t *testing.T) { } inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, test.update, inspect.HostConfig.CPUQuota) + assert.NilError(t, err) + assert.Check(t, is.Equal(test.update, inspect.HostConfig.CPUQuota)) res, err := container.Exec(ctx, client, cID, []string{"/bin/cat", "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"}) - require.NoError(t, err) - require.Empty(t, res.Stderr()) - require.Equal(t, 0, res.ExitCode) + assert.NilError(t, err) + assert.Assert(t, is.Len(res.Stderr(), 0)) + assert.Equal(t, 0, res.ExitCode) - assert.Equal(t, strconv.FormatInt(test.update, 10), strings.TrimSpace(res.Stdout())) + assert.Check(t, is.Equal(strconv.FormatInt(test.update, 10), strings.TrimSpace(res.Stdout()))) } } diff --git a/vendor/github.com/docker/docker/integration/container/update_test.go b/vendor/github.com/docker/docker/integration/container/update_test.go index 651e84cb2..03dcc635c 100644 --- a/vendor/github.com/docker/docker/integration/container/update_test.go +++ b/vendor/github.com/docker/docker/integration/container/update_test.go @@ -9,9 +9,9 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestUpdateRestartPolicy(t *testing.T) { @@ -32,7 +32,7 @@ func TestUpdateRestartPolicy(t *testing.T) { MaximumRetryCount: 5, }, }) - require.NoError(t, err) + assert.NilError(t, err) timeout := 60 * time.Second if testEnv.OSType == "windows" { @@ -42,9 +42,9 @@ func TestUpdateRestartPolicy(t *testing.T) { poll.WaitOn(t, container.IsInState(ctx, client, cID, "exited"), poll.WithDelay(100*time.Millisecond), poll.WithTimeout(timeout)) inspect, err := client.ContainerInspect(ctx, cID) - require.NoError(t, err) - assert.Equal(t, inspect.RestartCount, 5) - assert.Equal(t, inspect.HostConfig.RestartPolicy.MaximumRetryCount, 5) + assert.NilError(t, err) + assert.Check(t, is.Equal(inspect.RestartCount, 5)) + assert.Check(t, is.Equal(inspect.HostConfig.RestartPolicy.MaximumRetryCount, 5)) } func TestUpdateRestartWithAutoRemove(t *testing.T) { diff --git a/vendor/github.com/docker/docker/integration/image/commit_test.go b/vendor/github.com/docker/docker/integration/image/commit_test.go index 39fc956db..e13719b53 100644 --- a/vendor/github.com/docker/docker/integration/image/commit_test.go +++ b/vendor/github.com/docker/docker/integration/image/commit_test.go @@ -7,8 +7,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestCommitInheritsEnv(t *testing.T) { @@ -22,13 +22,13 @@ func TestCommitInheritsEnv(t *testing.T) { Changes: []string{"ENV PATH=/bin"}, Reference: "test-commit-image", }) - require.NoError(t, err) + assert.NilError(t, err) image1, _, err := client.ImageInspectWithRaw(ctx, commitResp1.ID) - require.NoError(t, err) + assert.NilError(t, err) expectedEnv1 := []string{"PATH=/bin"} - assert.Equal(t, expectedEnv1, image1.Config.Env) + assert.Check(t, is.DeepEqual(expectedEnv1, image1.Config.Env)) cID2 := container.Create(t, ctx, client, container.WithImage(image1.ID)) @@ -36,10 +36,10 @@ func TestCommitInheritsEnv(t *testing.T) { Changes: []string{"ENV PATH=/usr/bin:$PATH"}, Reference: "test-commit-image", }) - require.NoError(t, err) + assert.NilError(t, err) image2, _, err := client.ImageInspectWithRaw(ctx, commitResp2.ID) - require.NoError(t, err) + assert.NilError(t, err) expectedEnv2 := []string{"PATH=/usr/bin:/bin"} - assert.Equal(t, expectedEnv2, image2.Config.Env) + assert.Check(t, is.DeepEqual(expectedEnv2, image2.Config.Env)) } diff --git a/vendor/github.com/docker/docker/integration/image/remove_test.go b/vendor/github.com/docker/docker/integration/image/remove_test.go index 825724bd0..c89f6f7a0 100644 --- a/vendor/github.com/docker/docker/integration/image/remove_test.go +++ b/vendor/github.com/docker/docker/integration/image/remove_test.go @@ -8,8 +8,8 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestRemoveImageOrphaning(t *testing.T) { @@ -25,12 +25,12 @@ func TestRemoveImageOrphaning(t *testing.T) { Changes: []string{`ENTRYPOINT ["true"]`}, Reference: img, }) - require.NoError(t, err) + assert.NilError(t, err) // verifies that reference now points to first image resp, _, err := client.ImageInspectWithRaw(ctx, img) - require.NoError(t, err) - assert.Equal(t, resp.ID, commitResp1.ID) + assert.NilError(t, err) + assert.Check(t, is.Equal(resp.ID, commitResp1.ID)) // Create a container from created image, and commit a small change with same reference name cID2 := container.Create(t, ctx, client, container.WithImage(img), container.WithCmd("")) @@ -38,21 +38,21 @@ func TestRemoveImageOrphaning(t *testing.T) { Changes: []string{`LABEL Maintainer="Integration Tests"`}, Reference: img, }) - require.NoError(t, err) + assert.NilError(t, err) // verifies that reference now points to second image resp, _, err = client.ImageInspectWithRaw(ctx, img) - require.NoError(t, err) - assert.Equal(t, resp.ID, commitResp2.ID) + assert.NilError(t, err) + assert.Check(t, is.Equal(resp.ID, commitResp2.ID)) // try to remove the image, should not error out. _, err = client.ImageRemove(ctx, img, types.ImageRemoveOptions{}) - require.NoError(t, err) + assert.NilError(t, err) // check if the first image is still there resp, _, err = client.ImageInspectWithRaw(ctx, commitResp1.ID) - require.NoError(t, err) - assert.Equal(t, resp.ID, commitResp1.ID) + assert.NilError(t, err) + assert.Check(t, is.Equal(resp.ID, commitResp1.ID)) // check if the second image has been deleted _, _, err = client.ImageInspectWithRaw(ctx, commitResp2.ID) diff --git a/vendor/github.com/docker/docker/integration/internal/container/container.go b/vendor/github.com/docker/docker/integration/internal/container/container.go index 8d8fe2879..0c7657176 100644 --- a/vendor/github.com/docker/docker/integration/internal/container/container.go +++ b/vendor/github.com/docker/docker/integration/internal/container/container.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) // TestContainerConfig holds container configuration struct that @@ -37,7 +37,7 @@ func Create(t *testing.T, ctx context.Context, client client.APIClient, ops ...f } c, err := client.ContainerCreate(ctx, config.Config, config.HostConfig, config.NetworkingConfig, config.Name) - require.NoError(t, err) + assert.NilError(t, err) return c.ID } @@ -48,7 +48,7 @@ func Run(t *testing.T, ctx context.Context, client client.APIClient, ops ...func id := Create(t, ctx, client, ops...) err := client.ContainerStart(ctx, id, types.ContainerStartOptions{}) - require.NoError(t, err) + assert.NilError(t, err) return id } diff --git a/vendor/github.com/docker/docker/integration/internal/request/client.go b/vendor/github.com/docker/docker/integration/internal/request/client.go index 34e589ec8..07dc2e33c 100644 --- a/vendor/github.com/docker/docker/integration/internal/request/client.go +++ b/vendor/github.com/docker/docker/integration/internal/request/client.go @@ -9,14 +9,14 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/internal/test/environment" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) // NewAPIClient returns a docker API client configured from environment variables func NewAPIClient(t *testing.T, ops ...func(*client.Client) error) client.APIClient { ops = append([]func(*client.Client) error{client.FromEnv}, ops...) clt, err := client.NewClientWithOpts(ops...) - require.NoError(t, err) + assert.NilError(t, err) return clt } @@ -27,10 +27,10 @@ func DaemonTime(ctx context.Context, t *testing.T, client client.APIClient, test } info, err := client.Info(ctx) - require.NoError(t, err) + assert.NilError(t, err) dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) - require.NoError(t, err, "invalid time format in GET /info response") + assert.NilError(t, err, "invalid time format in GET /info response") return dt } diff --git a/vendor/github.com/docker/docker/integration/internal/swarm/service.go b/vendor/github.com/docker/docker/integration/internal/swarm/service.go index a46b02e14..79705961a 100644 --- a/vendor/github.com/docker/docker/integration/internal/swarm/service.go +++ b/vendor/github.com/docker/docker/integration/internal/swarm/service.go @@ -11,7 +11,8 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/internal/test/environment" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + "github.com/gotestyourself/gotestyourself/skip" ) const ( @@ -21,6 +22,7 @@ const ( // NewSwarm creates a swarm daemon for testing func NewSwarm(t *testing.T, testEnv *environment.Execution) *daemon.Swarm { + skip.IfCondition(t, testEnv.IsRemoteDaemon()) d := &daemon.Swarm{ Daemon: daemon.New(t, "", dockerdBinary, daemon.Config{ Experimental: testEnv.DaemonInfo.ExperimentalBuild, @@ -35,7 +37,7 @@ func NewSwarm(t *testing.T, testEnv *environment.Execution) *daemon.Swarm { args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} d.StartWithBusybox(t, args...) - require.NoError(t, d.Init(swarmtypes.InitRequest{})) + assert.NilError(t, d.Init(swarmtypes.InitRequest{})) return d } @@ -52,7 +54,7 @@ func CreateService(t *testing.T, d *daemon.Swarm, opts ...ServiceSpecOpt) string client := GetClient(t, d) resp, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{}) - require.NoError(t, err, "error creating service") + assert.NilError(t, err, "error creating service") return resp.ID } @@ -126,7 +128,7 @@ func GetRunningTasks(t *testing.T, d *daemon.Swarm, serviceID string) []swarmtyp Filters: filterArgs, } tasks, err := client.TaskList(context.Background(), options) - require.NoError(t, err) + assert.NilError(t, err) return tasks } @@ -136,11 +138,11 @@ func ExecTask(t *testing.T, d *daemon.Swarm, task swarmtypes.Task, config types. ctx := context.Background() resp, err := client.ContainerExecCreate(ctx, task.Status.ContainerStatus.ContainerID, config) - require.NoError(t, err, "error creating exec") + assert.NilError(t, err, "error creating exec") startCheck := types.ExecStartCheck{} attach, err := client.ContainerExecAttach(ctx, resp.ID, startCheck) - require.NoError(t, err, "error attaching to exec") + assert.NilError(t, err, "error attaching to exec") return attach } @@ -153,6 +155,6 @@ func ensureContainerSpec(spec *swarmtypes.ServiceSpec) { // GetClient creates a new client for the passed in swarm daemon. func GetClient(t *testing.T, d *daemon.Swarm) client.APIClient { client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) return client } diff --git a/vendor/github.com/docker/docker/integration/network/delete_test.go b/vendor/github.com/docker/docker/integration/network/delete_test.go index 0877d8bc8..e2af49de7 100644 --- a/vendor/github.com/docker/docker/integration/network/delete_test.go +++ b/vendor/github.com/docker/docker/integration/network/delete_test.go @@ -6,8 +6,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func containsNetwork(nws []types.NetworkResource, nw types.NetworkCreateResponse) bool { @@ -29,18 +29,18 @@ func createAmbiguousNetworks(t *testing.T) (types.NetworkCreateResponse, types.N ctx := context.Background() testNet, err := client.NetworkCreate(ctx, "testNet", types.NetworkCreate{}) - require.NoError(t, err) + assert.NilError(t, err) idPrefixNet, err := client.NetworkCreate(ctx, testNet.ID[:12], types.NetworkCreate{}) - require.NoError(t, err) + assert.NilError(t, err) fullIDNet, err := client.NetworkCreate(ctx, testNet.ID, types.NetworkCreate{}) - require.NoError(t, err) + assert.NilError(t, err) nws, err := client.NetworkList(ctx, types.NetworkListOptions{}) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, true, containsNetwork(nws, testNet), "failed to create network testNet") - assert.Equal(t, true, containsNetwork(nws, idPrefixNet), "failed to create network idPrefixNet") - assert.Equal(t, true, containsNetwork(nws, fullIDNet), "failed to create network fullIDNet") + assert.Check(t, is.Equal(true, containsNetwork(nws, testNet)), "failed to create network testNet") + assert.Check(t, is.Equal(true, containsNetwork(nws, idPrefixNet)), "failed to create network idPrefixNet") + assert.Check(t, is.Equal(true, containsNetwork(nws, fullIDNet)), "failed to create network fullIDNet") return testNet, idPrefixNet, fullIDNet } @@ -56,17 +56,17 @@ func TestDockerNetworkDeletePreferID(t *testing.T) { // Delete the network using a prefix of the first network's ID as name. // This should the network name with the id-prefix, not the original network. err := client.NetworkRemove(ctx, testNet.ID[:12]) - require.NoError(t, err) + assert.NilError(t, err) // Delete the network using networkID. This should remove the original // network, not the network with the name equal to the networkID err = client.NetworkRemove(ctx, testNet.ID) - require.NoError(t, err) + assert.NilError(t, err) // networks "testNet" and "idPrefixNet" should be removed, but "fullIDNet" should still exist nws, err := client.NetworkList(ctx, types.NetworkListOptions{}) - require.NoError(t, err) - assert.Equal(t, false, containsNetwork(nws, testNet), "Network testNet not removed") - assert.Equal(t, false, containsNetwork(nws, idPrefixNet), "Network idPrefixNet not removed") - assert.Equal(t, true, containsNetwork(nws, fullIDNet), "Network fullIDNet not found") + assert.NilError(t, err) + assert.Check(t, is.Equal(false, containsNetwork(nws, testNet)), "Network testNet not removed") + assert.Check(t, is.Equal(false, containsNetwork(nws, idPrefixNet)), "Network idPrefixNet not removed") + assert.Check(t, is.Equal(true, containsNetwork(nws, fullIDNet)), "Network fullIDNet not found") } diff --git a/vendor/github.com/docker/docker/integration/network/inspect_test.go b/vendor/github.com/docker/docker/integration/network/inspect_test.go index df586224a..ad4a344c4 100644 --- a/vendor/github.com/docker/docker/integration/network/inspect_test.go +++ b/vendor/github.com/docker/docker/integration/network/inspect_test.go @@ -1,18 +1,17 @@ package network // import "github.com/docker/docker/integration/network" import ( - "fmt" "runtime" "testing" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" + swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" - "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration/internal/swarm" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -21,10 +20,10 @@ const dockerdBinary = "dockerd" func TestInspectNetwork(t *testing.T) { defer setupTest(t)() - d := newSwarm(t) + d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) overlayName := "overlay1" networkCreate := types.NetworkCreate{ @@ -33,18 +32,19 @@ func TestInspectNetwork(t *testing.T) { } netResp, err := client.NetworkCreate(context.Background(), overlayName, networkCreate) - require.NoError(t, err) + assert.NilError(t, err) overlayID := netResp.ID var instances uint64 = 4 serviceName := "TestService" + // FIXME(vdemeester) consolidate with swarm.CreateService serviceSpec := swarmServiceSpec(serviceName, instances) - serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: overlayName}) + serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarmtypes.NetworkAttachmentConfig{Target: overlayName}) serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) pollSettings := func(config *poll.Settings) { if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { @@ -57,32 +57,32 @@ func TestInspectNetwork(t *testing.T) { poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), pollSettings) _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) - require.NoError(t, err) + assert.NilError(t, err) // Test inspect verbose with full NetworkID networkVerbose, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{ Verbose: true, }) - require.NoError(t, err) - require.True(t, validNetworkVerbose(networkVerbose, serviceName, instances)) + assert.NilError(t, err) + assert.Assert(t, validNetworkVerbose(networkVerbose, serviceName, instances)) // Test inspect verbose with partial NetworkID networkVerbose, err = client.NetworkInspect(context.Background(), overlayID[0:11], types.NetworkInspectOptions{ Verbose: true, }) - require.NoError(t, err) - require.True(t, validNetworkVerbose(networkVerbose, serviceName, instances)) + assert.NilError(t, err) + assert.Assert(t, validNetworkVerbose(networkVerbose, serviceName, instances)) // Test inspect verbose with Network name and swarm scope networkVerbose, err = client.NetworkInspect(context.Background(), overlayName, types.NetworkInspectOptions{ Verbose: true, Scope: "swarm", }) - require.NoError(t, err) - require.True(t, validNetworkVerbose(networkVerbose, serviceName, instances)) + assert.NilError(t, err) + assert.Assert(t, validNetworkVerbose(networkVerbose, serviceName, instances)) err = client.ServiceRemove(context.Background(), serviceID) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings) poll.WaitOn(t, noTasks(client), pollSettings) @@ -90,55 +90,36 @@ func TestInspectNetwork(t *testing.T) { serviceResp, err = client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) serviceID2 := serviceResp.ID poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), pollSettings) err = client.ServiceRemove(context.Background(), serviceID2) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceID2), pollSettings) poll.WaitOn(t, noTasks(client), pollSettings) err = client.NetworkRemove(context.Background(), overlayID) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, networkIsRemoved(client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } -func newSwarm(t *testing.T) *daemon.Swarm { - d := &daemon.Swarm{ - Daemon: daemon.New(t, "", dockerdBinary, daemon.Config{ - Experimental: testEnv.DaemonInfo.ExperimentalBuild, - }), - // TODO: better method of finding an unused port - Port: defaultSwarmPort, - } - // TODO: move to a NewSwarm constructor - d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port) - - // avoid networking conflicts - args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} - d.StartWithBusybox(t, args...) - - require.NoError(t, d.Init(swarm.InitRequest{})) - return d -} - -func swarmServiceSpec(name string, replicas uint64) swarm.ServiceSpec { - return swarm.ServiceSpec{ - Annotations: swarm.Annotations{ +func swarmServiceSpec(name string, replicas uint64) swarmtypes.ServiceSpec { + return swarmtypes.ServiceSpec{ + Annotations: swarmtypes.Annotations{ Name: name, }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: &swarm.ContainerSpec{ + TaskTemplate: swarmtypes.TaskSpec{ + ContainerSpec: &swarmtypes.ContainerSpec{ Image: "busybox:latest", Command: []string{"/bin/top"}, }, }, - Mode: swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ + Mode: swarmtypes.ServiceMode{ + Replicated: &swarmtypes.ReplicatedService{ Replicas: &replicas, }, }, @@ -157,7 +138,7 @@ func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, return poll.Error(err) case len(tasks) == int(instances): for _, task := range tasks { - if task.Status.State != swarm.TaskStateRunning { + if task.Status.State != swarmtypes.TaskStateRunning { return poll.Continue("waiting for tasks to enter run state") } } diff --git a/vendor/github.com/docker/docker/integration/network/service_test.go b/vendor/github.com/docker/docker/integration/network/service_test.go index 684b29c1c..ea7391180 100644 --- a/vendor/github.com/docker/docker/integration/network/service_test.go +++ b/vendor/github.com/docker/docker/integration/network/service_test.go @@ -6,36 +6,39 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" + swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" + "github.com/docker/docker/integration/internal/swarm" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) func TestServiceWithPredefinedNetwork(t *testing.T) { defer setupTest(t)() - d := newSwarm(t) + d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) hostName := "host" var instances uint64 = 1 serviceName := "TestService" serviceSpec := swarmServiceSpec(serviceName, instances) - serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: hostName}) + serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarmtypes.NetworkAttachmentConfig{Target: hostName}) serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) pollSettings := func(config *poll.Settings) { if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { config.Timeout = 50 * time.Second config.Delay = 100 * time.Millisecond + } else { + config.Timeout = 30 * time.Second + config.Delay = 100 * time.Millisecond } } @@ -43,20 +46,80 @@ func TestServiceWithPredefinedNetwork(t *testing.T) { poll.WaitOn(t, serviceRunningCount(client, serviceID, instances), pollSettings) _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) - require.NoError(t, err) + assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) - require.NoError(t, err) + assert.NilError(t, err) +} + +const ingressNet = "ingress" + +func TestServiceWithIngressNetwork(t *testing.T) { + defer setupTest(t)() + d := swarm.NewSwarm(t, testEnv) + defer d.Stop(t) + + client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) + assert.NilError(t, err) + + pollSettings := func(config *poll.Settings) { + if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { + config.Timeout = 50 * time.Second + config.Delay = 100 * time.Millisecond + } else { + config.Timeout = 30 * time.Second + config.Delay = 100 * time.Millisecond + } + } + + poll.WaitOn(t, swarmIngressReady(client), pollSettings) + + var instances uint64 = 1 + serviceName := "TestIngressService" + serviceSpec := swarmServiceSpec(serviceName, instances) + serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarmtypes.NetworkAttachmentConfig{Target: ingressNet}) + serviceSpec.EndpointSpec = &swarmtypes.EndpointSpec{ + Ports: []swarmtypes.PortConfig{ + { + Protocol: swarmtypes.PortConfigProtocolTCP, + TargetPort: 80, + PublishMode: swarmtypes.PortConfigPublishModeIngress, + }, + }, + } + + serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ + QueryRegistry: false, + }) + assert.NilError(t, err) + + serviceID := serviceResp.ID + poll.WaitOn(t, serviceRunningCount(client, serviceID, instances), pollSettings) + + _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + assert.NilError(t, err) + + err = client.ServiceRemove(context.Background(), serviceID) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings) - poll.WaitOn(t, noTasks(client), pollSettings) + poll.WaitOn(t, noServices(client), pollSettings) + // Ensure that "ingress" is not removed or corrupted + time.Sleep(10 * time.Second) + netInfo, err := client.NetworkInspect(context.Background(), ingressNet, types.NetworkInspectOptions{ + Verbose: true, + Scope: "swarm", + }) + assert.NilError(t, err, "Ingress network was removed after removing service!") + assert.Assert(t, len(netInfo.Containers) != 0, "No load balancing endpoints in ingress network") + assert.Assert(t, len(netInfo.Peers) != 0, "No peers (including self) in ingress network") + _, ok := netInfo.Containers["ingress-sbox"] + assert.Assert(t, ok, "ingress-sbox not present in ingress network") } func serviceRunningCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result { return func(log poll.LogT) poll.Result { - filter := filters.NewArgs() - filter.Add("service", serviceID) services, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) if err != nil { return poll.Error(err) @@ -68,3 +131,39 @@ func serviceRunningCount(client client.ServiceAPIClient, serviceID string, insta return poll.Success() } } + +func swarmIngressReady(client client.NetworkAPIClient) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + netInfo, err := client.NetworkInspect(context.Background(), ingressNet, types.NetworkInspectOptions{ + Verbose: true, + Scope: "swarm", + }) + if err != nil { + return poll.Error(err) + } + np := len(netInfo.Peers) + nc := len(netInfo.Containers) + if np == 0 || nc == 0 { + return poll.Continue("ingress not ready: %d peers and %d containers", nc, np) + } + _, ok := netInfo.Containers["ingress-sbox"] + if !ok { + return poll.Continue("ingress not ready: does not contain the ingress-sbox") + } + return poll.Success() + } +} + +func noServices(client client.ServiceAPIClient) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + services, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + switch { + case err != nil: + return poll.Error(err) + case len(services) == 0: + return poll.Success() + default: + return poll.Continue("Service count at %d waiting for 0", len(services)) + } + } +} diff --git a/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go index 667fc3d3c..5bca6c138 100644 --- a/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go +++ b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_test.go @@ -24,8 +24,8 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/internal/test/environment" "github.com/docker/docker/pkg/authorization" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/require" ) const ( @@ -55,15 +55,15 @@ func setupTestV1(t *testing.T) func() { teardown := setupTest(t) err := os.MkdirAll("/etc/docker/plugins", 0755) - require.Nil(t, err) + assert.NilError(t, err) fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) err = ioutil.WriteFile(fileName, []byte(server.URL), 0644) - require.Nil(t, err) + assert.NilError(t, err) return func() { err := os.RemoveAll("/etc/docker/plugins") - require.Nil(t, err) + assert.NilError(t, err) teardown() ctrl = nil @@ -87,7 +87,7 @@ func TestAuthZPluginAllowRequest(t *testing.T) { d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin) client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -98,9 +98,9 @@ func TestAuthZPluginAllowRequest(t *testing.T) { assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", cID)) _, err = client.ServerVersion(ctx) - require.Nil(t, err) - require.Equal(t, 1, ctrl.versionReqCount) - require.Equal(t, 1, ctrl.versionResCount) + assert.NilError(t, err) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 1, ctrl.versionResCount) } func TestAuthZPluginTLS(t *testing.T) { @@ -126,13 +126,13 @@ func TestAuthZPluginTLS(t *testing.T) { ctrl.resRes.Allow = true client, err := newTLSAPIClient(testDaemonHTTPSAddr, cacertPath, clientCertPath, clientKeyPath) - require.Nil(t, err) + assert.NilError(t, err) _, err = client.ServerVersion(context.Background()) - require.Nil(t, err) + assert.NilError(t, err) - require.Equal(t, "client", ctrl.reqUser) - require.Equal(t, "client", ctrl.resUser) + assert.Equal(t, "client", ctrl.reqUser) + assert.Equal(t, "client", ctrl.resUser) } func newTLSAPIClient(host, cacertPath, certPath, keyPath string) (client.APIClient, error) { @@ -153,16 +153,16 @@ func TestAuthZPluginDenyRequest(t *testing.T) { ctrl.reqRes.Msg = unauthorizedMessage client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Ensure command is blocked _, err = client.ServerVersion(context.Background()) - require.NotNil(t, err) - require.Equal(t, 1, ctrl.versionReqCount) - require.Equal(t, 0, ctrl.versionResCount) + assert.Assert(t, err != nil) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 0, ctrl.versionResCount) // Ensure unauthorized message appears in response - require.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error()) + assert.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error()) } // TestAuthZPluginAPIDenyResponse validates that when authorization @@ -174,17 +174,17 @@ func TestAuthZPluginAPIDenyResponse(t *testing.T) { ctrl.resRes.Msg = unauthorizedMessage daemonURL, err := url.Parse(d.Sock()) - require.Nil(t, err) + assert.NilError(t, err) conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) - require.Nil(t, err) + assert.NilError(t, err) client := httputil.NewClientConn(conn, nil) req, err := http.NewRequest("GET", "/version", nil) - require.Nil(t, err) + assert.NilError(t, err) resp, err := client.Do(req) - require.Nil(t, err) - require.Equal(t, http.StatusForbidden, resp.StatusCode) + assert.NilError(t, err) + assert.DeepEqual(t, http.StatusForbidden, resp.StatusCode) } func TestAuthZPluginDenyResponse(t *testing.T) { @@ -195,16 +195,16 @@ func TestAuthZPluginDenyResponse(t *testing.T) { ctrl.resRes.Msg = unauthorizedMessage client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Ensure command is blocked _, err = client.ServerVersion(context.Background()) - require.NotNil(t, err) - require.Equal(t, 1, ctrl.versionReqCount) - require.Equal(t, 1, ctrl.versionResCount) + assert.Assert(t, err != nil) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 1, ctrl.versionResCount) // Ensure unauthorized message appears in response - require.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error()) + assert.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error()) } // TestAuthZPluginAllowEventStream verifies event stream propagates @@ -218,7 +218,7 @@ func TestAuthZPluginAllowEventStream(t *testing.T) { d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin) client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -231,7 +231,7 @@ func TestAuthZPluginAllowEventStream(t *testing.T) { for i := 0; i < 100; i++ { c, err := client.ContainerInspect(ctx, cID) - require.Nil(t, err) + assert.NilError(t, err) if c.State.Running { break } @@ -258,7 +258,7 @@ func TestAuthZPluginAllowEventStream(t *testing.T) { if err == io.EOF { t.Fatal("premature end of event stream") } - require.Nil(t, err) + assert.NilError(t, err) case <-time.After(30 * time.Second): // Fail the test t.Fatal("event stream timeout") @@ -279,10 +279,10 @@ func systemTime(t *testing.T, client client.APIClient, testEnv *environment.Exec ctx := context.Background() info, err := client.Info(ctx) - require.Nil(t, err) + assert.NilError(t, err) dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) - require.Nil(t, err, "invalid time format in GET /info response") + assert.NilError(t, err, "invalid time format in GET /info response") return dt } @@ -303,12 +303,12 @@ func TestAuthZPluginErrorResponse(t *testing.T) { ctrl.resRes.Err = errorMessage client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Ensure command is blocked _, err = client.ServerVersion(context.Background()) - require.NotNil(t, err) - require.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage), err.Error()) + assert.Assert(t, err != nil) + assert.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage), err.Error()) } func TestAuthZPluginErrorRequest(t *testing.T) { @@ -317,12 +317,12 @@ func TestAuthZPluginErrorRequest(t *testing.T) { ctrl.reqRes.Err = errorMessage client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Ensure command is blocked _, err = client.ServerVersion(context.Background()) - require.NotNil(t, err) - require.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage), err.Error()) + assert.Assert(t, err != nil) + assert.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage), err.Error()) } func TestAuthZPluginEnsureNoDuplicatePluginRegistration(t *testing.T) { @@ -333,14 +333,14 @@ func TestAuthZPluginEnsureNoDuplicatePluginRegistration(t *testing.T) { ctrl.resRes.Allow = true client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) _, err = client.ServerVersion(context.Background()) - require.Nil(t, err) + assert.NilError(t, err) // assert plugin is only called once.. - require.Equal(t, 1, ctrl.versionReqCount) - require.Equal(t, 1, ctrl.versionResCount) + assert.Equal(t, 1, ctrl.versionReqCount) + assert.Equal(t, 1, ctrl.versionResCount) } func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) { @@ -350,36 +350,36 @@ func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) { d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin) client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) ctx := context.Background() tmp, err := ioutil.TempDir("", "test-authz-load-import") - require.Nil(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmp) savedImagePath := filepath.Join(tmp, "save.tar") err = imageSave(client, savedImagePath, "busybox") - require.Nil(t, err) + assert.NilError(t, err) err = imageLoad(client, savedImagePath) - require.Nil(t, err) + assert.NilError(t, err) exportedImagePath := filepath.Join(tmp, "export.tar") cID := container.Run(t, ctx, client) responseReader, err := client.ContainerExport(context.Background(), cID) - require.Nil(t, err) + assert.NilError(t, err) defer responseReader.Close() file, err := os.Create(exportedImagePath) - require.Nil(t, err) + assert.NilError(t, err) defer file.Close() _, err = io.Copy(file, responseReader) - require.Nil(t, err) + assert.NilError(t, err) err = imageImport(client, exportedImagePath) - require.Nil(t, err) + assert.NilError(t, err) } func imageSave(client client.APIClient, path, image string) error { @@ -442,16 +442,16 @@ func TestAuthZPluginHeader(t *testing.T) { d.StartWithBusybox(t, "--debug", "--authorization-plugin="+testAuthZPlugin) daemonURL, err := url.Parse(d.Sock()) - require.Nil(t, err) + assert.NilError(t, err) conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) - require.Nil(t, err) + assert.NilError(t, err) client := httputil.NewClientConn(conn, nil) req, err := http.NewRequest("GET", "/version", nil) - require.Nil(t, err) + assert.NilError(t, err) resp, err := client.Do(req) - require.Nil(t, err) - require.Equal(t, "application/json", resp.Header["Content-Type"][0]) + assert.NilError(t, err) + assert.Equal(t, "application/json", resp.Header["Content-Type"][0]) } // assertURIRecorded verifies that the given URI was sent and recorded diff --git a/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go index 5efa421e8..fa3d37dc8 100644 --- a/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go +++ b/vendor/github.com/docker/docker/integration/plugin/authz/authz_plugin_v2_test.go @@ -16,8 +16,8 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/requirement" + "github.com/gotestyourself/gotestyourself/assert" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/require" ) var ( @@ -44,13 +44,13 @@ func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) { defer setupTestV2(t)() client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) ctx := context.Background() // Install authz plugin err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag) - require.Nil(t, err) + assert.NilError(t, err) // start the daemon with the plugin and load busybox, --net=none build fails otherwise // because it needs to pull busybox d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag) @@ -60,7 +60,7 @@ func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) { cID := container.Run(t, ctx, client) _, err = client.ContainerInspect(ctx, cID) - require.Nil(t, err) + assert.NilError(t, err) } func TestAuthZPluginV2Disable(t *testing.T) { @@ -68,26 +68,26 @@ func TestAuthZPluginV2Disable(t *testing.T) { defer setupTestV2(t)() client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Install authz plugin err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag) - require.Nil(t, err) + assert.NilError(t, err) d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag) d.LoadBusybox(t) _, err = client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{Driver: "local"}) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) // disable the plugin err = client.PluginDisable(context.Background(), authzPluginNameWithTag, types.PluginDisableOptions{}) - require.Nil(t, err) + assert.NilError(t, err) // now test to see if the docker api works. _, err = client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{Driver: "local"}) - require.Nil(t, err) + assert.NilError(t, err) } func TestAuthZPluginV2RejectVolumeRequests(t *testing.T) { @@ -95,35 +95,35 @@ func TestAuthZPluginV2RejectVolumeRequests(t *testing.T) { defer setupTestV2(t)() client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Install authz plugin err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag) - require.Nil(t, err) + assert.NilError(t, err) // restart the daemon with the plugin d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag) _, err = client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{Driver: "local"}) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) _, err = client.VolumeList(context.Background(), filters.Args{}) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) // The plugin will block the command before it can determine the volume does not exist err = client.VolumeRemove(context.Background(), "test", false) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) _, err = client.VolumeInspect(context.Background(), "test") - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) _, err = client.VolumesPrune(context.Background(), filters.Args{}) - require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) + assert.Assert(t, err != nil) + assert.Assert(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))) } func TestAuthZPluginV2BadManifestFailsDaemonStart(t *testing.T) { @@ -131,15 +131,15 @@ func TestAuthZPluginV2BadManifestFailsDaemonStart(t *testing.T) { defer setupTestV2(t)() client, err := d.NewClient() - require.Nil(t, err) + assert.NilError(t, err) // Install authz plugin with bad manifest err = pluginInstallGrantAllPermissions(client, authzPluginBadManifestName) - require.Nil(t, err) + assert.NilError(t, err) // start the daemon with the plugin, it will error err = d.RestartWithError("--authorization-plugin=" + authzPluginBadManifestName) - require.NotNil(t, err) + assert.Assert(t, err != nil) // restarting the daemon without requiring the plugin will succeed d.Start(t) @@ -150,7 +150,7 @@ func TestAuthZPluginV2NonexistentFailsDaemonStart(t *testing.T) { // start the daemon with a non-existent authz plugin, it will error err := d.RestartWithError("--authorization-plugin=" + nonexistentAuthzPluginName) - require.NotNil(t, err) + assert.Assert(t, err != nil) // restarting the daemon without requiring the plugin will succeed d.Start(t) diff --git a/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go b/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go index ea72a03f1..3d5f406b9 100644 --- a/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go +++ b/vendor/github.com/docker/docker/integration/plugin/authz/main_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/internal/test/environment" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/pkg/plugins" + "github.com/gotestyourself/gotestyourself/skip" ) var ( @@ -48,6 +49,7 @@ func TestMain(m *testing.M) { } func setupTest(t *testing.T) func() { + skip.IfCondition(t, testEnv.IsRemoteDaemon(), "cannot run daemon when remote daemon") environment.ProtectAll(t, testEnv) d = daemon.New(t, "", dockerdBinary, daemon.Config{ diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go index 61b2f35f4..371d43d83 100644 --- a/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go +++ b/vendor/github.com/docker/docker/integration/plugin/logging/helpers_test.go @@ -14,8 +14,6 @@ import ( "github.com/pkg/errors" ) -const dockerdBinary = "dockerd" - var pluginBuildLock = locker.New() func ensurePlugin(t *testing.T, name string) string { diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/main_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/main_test.go new file mode 100644 index 000000000..b2446ca8c --- /dev/null +++ b/vendor/github.com/docker/docker/integration/plugin/logging/main_test.go @@ -0,0 +1,31 @@ +package logging // import "github.com/docker/docker/integration/plugin/logging" + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var ( + testEnv *environment.Execution +) + +const dockerdBinary = "dockerd" + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = environment.EnsureFrozenImagesLinux(testEnv) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + testEnv.Print() + os.Exit(m.Run()) +} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go b/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go index 660732161..f0d6e9e8b 100644 --- a/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go +++ b/vendor/github.com/docker/docker/integration/plugin/logging/validation_test.go @@ -6,13 +6,15 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/daemon" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + "github.com/gotestyourself/gotestyourself/skip" ) // Regression test for #35553 // Ensure that a daemon with a log plugin set as the default logger for containers // does not keep the daemon from starting. func TestDaemonStartWithLogOpt(t *testing.T) { + skip.IfCondition(t, testEnv.IsRemoteDaemon(), "cannot run daemon when remote daemon") t.Parallel() d := daemon.New(t, "", dockerdBinary, daemon.Config{}) @@ -20,12 +22,12 @@ func TestDaemonStartWithLogOpt(t *testing.T) { defer d.Stop(t) client, err := d.NewClient() - assert.NoError(t, err) + assert.Check(t, err) ctx := context.Background() createPlugin(t, client, "test", "dummy", asLogDriver) err = client.PluginEnable(ctx, "test", types.PluginEnableOptions{Timeout: 30}) - assert.NoError(t, err) + assert.Check(t, err) defer client.PluginRemove(ctx, "test", types.PluginRemoveOptions{Force: true}) d.Stop(t) diff --git a/vendor/github.com/docker/docker/integration/secret/secret_test.go b/vendor/github.com/docker/docker/integration/secret/secret_test.go index 27c8fd3d0..4a1e1b3dc 100644 --- a/vendor/github.com/docker/docker/integration/secret/secret_test.go +++ b/vendor/github.com/docker/docker/integration/secret/secret_test.go @@ -13,9 +13,9 @@ import ( "github.com/docker/docker/integration/internal/swarm" "github.com/docker/docker/internal/testutil" "github.com/docker/docker/pkg/stdcopy" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -26,7 +26,7 @@ func TestSecretInspect(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -34,12 +34,12 @@ func TestSecretInspect(t *testing.T) { secretID := createSecret(ctx, t, client, testName, []byte("TESTINGDATA"), nil) secret, _, err := client.SecretInspectWithRaw(context.Background(), secretID) - require.NoError(t, err) - assert.Equal(t, secret.Spec.Name, testName) + assert.NilError(t, err) + assert.Check(t, is.Equal(secret.Spec.Name, testName)) secret, _, err = client.SecretInspectWithRaw(context.Background(), testName) - require.NoError(t, err) - assert.Equal(t, secretID, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(secretID, secretID)) } func TestSecretList(t *testing.T) { @@ -49,7 +49,7 @@ func TestSecretList(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -75,8 +75,8 @@ func TestSecretList(t *testing.T) { // test by `secret ls` entries, err := client.SecretList(ctx, types.SecretListOptions{}) - require.NoError(t, err) - assert.Equal(t, names(entries), testNames) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), testNames)) testCases := []struct { filters filters.Args @@ -110,8 +110,8 @@ func TestSecretList(t *testing.T) { entries, err = client.SecretList(ctx, types.SecretListOptions{ Filters: tc.filters, }) - require.NoError(t, err) - assert.Equal(t, names(entries), tc.expected) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(names(entries), tc.expected)) } } @@ -124,8 +124,8 @@ func createSecret(ctx context.Context, t *testing.T, client client.APIClient, na }, Data: data, }) - require.NoError(t, err) - assert.NotEqual(t, secret.ID, "") + assert.NilError(t, err) + assert.Check(t, secret.ID != "") return secret.ID } @@ -136,7 +136,7 @@ func TestSecretsCreateAndDelete(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() @@ -154,7 +154,7 @@ func TestSecretsCreateAndDelete(t *testing.T) { // Ported from original TestSecretsDelete err = client.SecretRemove(ctx, secretID) - require.NoError(t, err) + assert.NilError(t, err) _, _, err = client.SecretInspectWithRaw(ctx, secretID) testutil.ErrorContains(t, err, "No such secret") @@ -170,11 +170,11 @@ func TestSecretsCreateAndDelete(t *testing.T) { }) insp, _, err := client.SecretInspectWithRaw(ctx, secretID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Name, testName) - assert.Equal(t, len(insp.Spec.Labels), 2) - assert.Equal(t, insp.Spec.Labels["key1"], "value1") - assert.Equal(t, insp.Spec.Labels["key2"], "value2") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Name, testName)) + assert.Check(t, is.Equal(len(insp.Spec.Labels), 2)) + assert.Check(t, is.Equal(insp.Spec.Labels["key1"], "value1")) + assert.Check(t, is.Equal(insp.Spec.Labels["key2"], "value2")) } func TestSecretsUpdate(t *testing.T) { @@ -184,44 +184,44 @@ func TestSecretsUpdate(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() testName := "test_secret" secretID := createSecret(ctx, t, client, testName, []byte("TESTINGDATA"), nil) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err := client.SecretInspectWithRaw(ctx, secretID) - require.NoError(t, err) - assert.Equal(t, insp.ID, secretID) + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.ID, secretID)) // test UpdateSecret with full ID insp.Spec.Labels = map[string]string{"test": "test1"} err = client.SecretUpdate(ctx, secretID, insp.Version, insp.Spec) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.SecretInspectWithRaw(ctx, secretID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Labels["test"], "test1") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test1")) // test UpdateSecret with full name insp.Spec.Labels = map[string]string{"test": "test2"} err = client.SecretUpdate(ctx, testName, insp.Version, insp.Spec) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.SecretInspectWithRaw(ctx, secretID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Labels["test"], "test2") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test2")) // test UpdateSecret with prefix ID insp.Spec.Labels = map[string]string{"test": "test3"} err = client.SecretUpdate(ctx, secretID[:1], insp.Version, insp.Spec) - require.NoError(t, err) + assert.NilError(t, err) insp, _, err = client.SecretInspectWithRaw(ctx, secretID) - require.NoError(t, err) - assert.Equal(t, insp.Spec.Labels["test"], "test3") + assert.NilError(t, err) + assert.Check(t, is.Equal(insp.Spec.Labels["test"], "test3")) // test UpdateSecret in updating Data which is not supported in daemon // this test will produce an error in func UpdateSecret @@ -244,7 +244,7 @@ func TestTemplatedSecret(t *testing.T) { Data: []byte("this is a secret"), } referencedSecret, err := client.SecretCreate(ctx, referencedSecretSpec) - assert.NoError(t, err) + assert.Check(t, err) referencedConfigSpec := swarmtypes.ConfigSpec{ Annotations: swarmtypes.Annotations{ @@ -253,7 +253,7 @@ func TestTemplatedSecret(t *testing.T) { Data: []byte("this is a config"), } referencedConfig, err := client.ConfigCreate(ctx, referencedConfigSpec) - assert.NoError(t, err) + assert.Check(t, err) secretSpec := swarmtypes.SecretSpec{ Annotations: swarmtypes.Annotations{ @@ -268,7 +268,7 @@ func TestTemplatedSecret(t *testing.T) { } templatedSecret, err := client.SecretCreate(ctx, secretSpec) - assert.NoError(t, err) + assert.Check(t, err) serviceID := swarm.CreateService(t, d, swarm.ServiceWithSecret( @@ -346,8 +346,8 @@ func TestTemplatedSecret(t *testing.T) { func assertAttachedStream(t *testing.T, attach types.HijackedResponse, expect string) { buf := bytes.NewBuffer(nil) _, err := stdcopy.StdCopy(buf, buf, attach.Reader) - require.NoError(t, err) - assert.Contains(t, buf.String(), expect) + assert.NilError(t, err) + assert.Check(t, is.Contains(buf.String(), expect)) } func waitAndAssert(t *testing.T, timeout time.Duration, f func(*testing.T) bool) { diff --git a/vendor/github.com/docker/docker/integration/service/create_test.go b/vendor/github.com/docker/docker/integration/service/create_test.go index eb66cfc2f..7170bda49 100644 --- a/vendor/github.com/docker/docker/integration/service/create_test.go +++ b/vendor/github.com/docker/docker/integration/service/create_test.go @@ -11,9 +11,9 @@ import ( swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/swarm" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -22,7 +22,7 @@ func TestCreateServiceMultipleTimes(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) overlayName := "overlay1" networkCreate := types.NetworkCreate{ @@ -31,7 +31,7 @@ func TestCreateServiceMultipleTimes(t *testing.T) { } netResp, err := client.NetworkCreate(context.Background(), overlayName, networkCreate) - require.NoError(t, err) + assert.NilError(t, err) overlayID := netResp.ID var instances uint64 = 4 @@ -41,7 +41,7 @@ func TestCreateServiceMultipleTimes(t *testing.T) { serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) pollSettings := func(config *poll.Settings) { // It takes about ~25s to finish the multi services creation in this case per the pratical observation on arm64/arm platform @@ -55,10 +55,10 @@ func TestCreateServiceMultipleTimes(t *testing.T) { poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), pollSettings) _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) - require.NoError(t, err) + assert.NilError(t, err) err = client.ServiceRemove(context.Background(), serviceID) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings) poll.WaitOn(t, noTasks(client), pollSettings) @@ -66,19 +66,19 @@ func TestCreateServiceMultipleTimes(t *testing.T) { serviceResp, err = client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) serviceID2 := serviceResp.ID poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), pollSettings) err = client.ServiceRemove(context.Background(), serviceID2) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceID2), pollSettings) poll.WaitOn(t, noTasks(client), pollSettings) err = client.NetworkRemove(context.Background(), overlayID) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, networkIsRemoved(client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) } @@ -88,7 +88,7 @@ func TestCreateWithDuplicateNetworkNames(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) name := "foo" networkCreate := types.NetworkCreate{ @@ -97,15 +97,15 @@ func TestCreateWithDuplicateNetworkNames(t *testing.T) { } n1, err := client.NetworkCreate(context.Background(), name, networkCreate) - require.NoError(t, err) + assert.NilError(t, err) n2, err := client.NetworkCreate(context.Background(), name, networkCreate) - require.NoError(t, err) + assert.NilError(t, err) // Dupliates with name but with different driver networkCreate.Driver = "overlay" n3, err := client.NetworkCreate(context.Background(), name, networkCreate) - require.NoError(t, err) + assert.NilError(t, err) // Create Service with the same name var instances uint64 = 1 @@ -114,30 +114,30 @@ func TestCreateWithDuplicateNetworkNames(t *testing.T) { serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarmtypes.NetworkAttachmentConfig{Target: name}) service, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{}) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceRunningTasksCount(client, service.ID, instances)) resp, _, err := client.ServiceInspectWithRaw(context.Background(), service.ID, types.ServiceInspectOptions{}) - require.NoError(t, err) - assert.Equal(t, n3.ID, resp.Spec.TaskTemplate.Networks[0].Target) + assert.NilError(t, err) + assert.Check(t, is.Equal(n3.ID, resp.Spec.TaskTemplate.Networks[0].Target)) // Remove Service err = client.ServiceRemove(context.Background(), service.ID) - require.NoError(t, err) + assert.NilError(t, err) // Make sure task has been destroyed. poll.WaitOn(t, serviceIsRemoved(client, service.ID)) // Remove networks err = client.NetworkRemove(context.Background(), n3.ID) - require.NoError(t, err) + assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n2.ID) - require.NoError(t, err) + assert.NilError(t, err) err = client.NetworkRemove(context.Background(), n1.ID) - require.NoError(t, err) + assert.NilError(t, err) // Make sure networks have been destroyed. poll.WaitOn(t, networkIsRemoved(client, n3.ID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) @@ -150,7 +150,7 @@ func TestCreateServiceSecretFileMode(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() secretResp, err := client.SecretCreate(ctx, swarmtypes.SecretSpec{ @@ -159,7 +159,7 @@ func TestCreateServiceSecretFileMode(t *testing.T) { }, Data: []byte("TESTSECRET"), }) - require.NoError(t, err) + assert.NilError(t, err) var instances uint64 = 1 serviceSpec := swarmtypes.ServiceSpec{ @@ -194,7 +194,7 @@ func TestCreateServiceSecretFileMode(t *testing.T) { serviceResp, err := client.ServiceCreate(ctx, serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceRunningTasksCount(client, serviceResp.ID, instances)) @@ -203,27 +203,27 @@ func TestCreateServiceSecretFileMode(t *testing.T) { tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) - require.NoError(t, err) - assert.Equal(t, len(tasks), 1) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) - require.NoError(t, err) + assert.NilError(t, err) defer body.Close() content, err := ioutil.ReadAll(body) - require.NoError(t, err) - assert.Contains(t, string(content), "-rwxrwxrwx") + assert.NilError(t, err) + assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceResp.ID) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceResp.ID)) poll.WaitOn(t, noTasks(client)) err = client.SecretRemove(ctx, "TestSecret") - require.NoError(t, err) + assert.NilError(t, err) } func TestCreateServiceConfigFileMode(t *testing.T) { @@ -231,7 +231,7 @@ func TestCreateServiceConfigFileMode(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() configResp, err := client.ConfigCreate(ctx, swarmtypes.ConfigSpec{ @@ -240,7 +240,7 @@ func TestCreateServiceConfigFileMode(t *testing.T) { }, Data: []byte("TESTCONFIG"), }) - require.NoError(t, err) + assert.NilError(t, err) var instances uint64 = 1 serviceSpec := swarmtypes.ServiceSpec{ @@ -275,7 +275,7 @@ func TestCreateServiceConfigFileMode(t *testing.T) { serviceResp, err := client.ServiceCreate(ctx, serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceRunningTasksCount(client, serviceResp.ID, instances)) @@ -284,27 +284,27 @@ func TestCreateServiceConfigFileMode(t *testing.T) { tasks, err := client.TaskList(ctx, types.TaskListOptions{ Filters: filter, }) - require.NoError(t, err) - assert.Equal(t, len(tasks), 1) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(tasks), 1)) body, err := client.ContainerLogs(ctx, tasks[0].Status.ContainerStatus.ContainerID, types.ContainerLogsOptions{ ShowStdout: true, }) - require.NoError(t, err) + assert.NilError(t, err) defer body.Close() content, err := ioutil.ReadAll(body) - require.NoError(t, err) - assert.Contains(t, string(content), "-rwxrwxrwx") + assert.NilError(t, err) + assert.Check(t, is.Contains(string(content), "-rwxrwxrwx")) err = client.ServiceRemove(ctx, serviceResp.ID) - require.NoError(t, err) + assert.NilError(t, err) poll.WaitOn(t, serviceIsRemoved(client, serviceResp.ID)) poll.WaitOn(t, noTasks(client)) err = client.ConfigRemove(ctx, "TestConfig") - require.NoError(t, err) + assert.NilError(t, err) } func swarmServiceSpec(name string, replicas uint64) swarmtypes.ServiceSpec { diff --git a/vendor/github.com/docker/docker/integration/service/inspect_test.go b/vendor/github.com/docker/docker/integration/service/inspect_test.go index 8cd24bc31..d4d342e64 100644 --- a/vendor/github.com/docker/docker/integration/service/inspect_test.go +++ b/vendor/github.com/docker/docker/integration/service/inspect_test.go @@ -10,22 +10,23 @@ import ( swarmtypes "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/swarm" + "github.com/google/go-cmp/cmp" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/poll" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/net/context" ) func TestInspect(t *testing.T) { - skip.IfCondition(t, testEnv.IsRemoteDaemon()) + skip.If(t, testEnv.IsRemoteDaemon()) defer setupTest(t)() d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) - var before = time.Now() + var now = time.Now() var instances uint64 = 2 serviceSpec := fullSwarmServiceSpec("test-service-inspect", instances) @@ -33,18 +34,43 @@ func TestInspect(t *testing.T) { resp, err := client.ServiceCreate(ctx, serviceSpec, types.ServiceCreateOptions{ QueryRegistry: false, }) - require.NoError(t, err) + assert.NilError(t, err) id := resp.ID poll.WaitOn(t, serviceContainerCount(client, id, instances)) service, _, err := client.ServiceInspectWithRaw(ctx, id, types.ServiceInspectOptions{}) - require.NoError(t, err) - assert.Equal(t, serviceSpec, service.Spec) - assert.Equal(t, uint64(11), service.Meta.Version.Index) - assert.Equal(t, id, service.ID) - assert.WithinDuration(t, before, service.CreatedAt, 30*time.Second) - assert.WithinDuration(t, before, service.UpdatedAt, 30*time.Second) + assert.NilError(t, err) + + expected := swarmtypes.Service{ + ID: id, + Spec: serviceSpec, + Meta: swarmtypes.Meta{ + Version: swarmtypes.Version{Index: uint64(11)}, + CreatedAt: now, + UpdatedAt: now, + }, + } + assert.Check(t, is.DeepEqual(service, expected, cmpServiceOpts())) +} + +// TODO: use helpers from gotestyourself/assert/opt when available +func cmpServiceOpts() cmp.Option { + const threshold = 20 * time.Second + + metaTimeFields := func(path cmp.Path) bool { + switch path.String() { + case "Meta.CreatedAt", "Meta.UpdatedAt": + return true + } + return false + } + withinThreshold := cmp.Comparer(func(x, y time.Time) bool { + delta := x.Sub(y) + return delta < threshold && delta > -threshold + }) + + return cmp.FilterPath(metaTimeFields, withinThreshold) } func fullSwarmServiceSpec(name string, replicas uint64) swarmtypes.ServiceSpec { diff --git a/vendor/github.com/docker/docker/integration/service/network_test.go b/vendor/github.com/docker/docker/integration/service/network_test.go index 09b0a1f12..6b8c891cd 100644 --- a/vendor/github.com/docker/docker/integration/service/network_test.go +++ b/vendor/github.com/docker/docker/integration/service/network_test.go @@ -9,8 +9,8 @@ import ( "github.com/docker/docker/client" "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/swarm" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestDockerNetworkConnectAlias(t *testing.T) { @@ -18,7 +18,7 @@ func TestDockerNetworkConnectAlias(t *testing.T) { d := swarm.NewSwarm(t, testEnv) defer d.Stop(t) client, err := client.NewClientWithOpts(client.WithHost((d.Sock()))) - require.NoError(t, err) + assert.NilError(t, err) ctx := context.Background() name := "test-alias" @@ -26,7 +26,7 @@ func TestDockerNetworkConnectAlias(t *testing.T) { Driver: "overlay", Attachable: true, }) - require.NoError(t, err) + assert.NilError(t, err) container.Create(t, ctx, client, container.WithName("ng1"), func(c *container.TestContainerConfig) { c.NetworkingConfig = &network.NetworkingConfig{ @@ -41,15 +41,15 @@ func TestDockerNetworkConnectAlias(t *testing.T) { "aaa", }, }) - require.NoError(t, err) + assert.NilError(t, err) err = client.ContainerStart(ctx, "ng1", types.ContainerStartOptions{}) - require.NoError(t, err) + assert.NilError(t, err) ng1, err := client.ContainerInspect(ctx, "ng1") - require.NoError(t, err) - assert.Equal(t, len(ng1.NetworkSettings.Networks[name].Aliases), 2) - assert.Equal(t, ng1.NetworkSettings.Networks[name].Aliases[0], "aaa") + assert.NilError(t, err) + assert.Check(t, is.Equal(len(ng1.NetworkSettings.Networks[name].Aliases), 2)) + assert.Check(t, is.Equal(ng1.NetworkSettings.Networks[name].Aliases[0], "aaa")) container.Create(t, ctx, client, container.WithName("ng2"), func(c *container.TestContainerConfig) { c.NetworkingConfig = &network.NetworkingConfig{ @@ -64,13 +64,13 @@ func TestDockerNetworkConnectAlias(t *testing.T) { "bbb", }, }) - require.NoError(t, err) + assert.NilError(t, err) err = client.ContainerStart(ctx, "ng2", types.ContainerStartOptions{}) - require.NoError(t, err) + assert.NilError(t, err) ng2, err := client.ContainerInspect(ctx, "ng2") - require.NoError(t, err) - assert.Equal(t, len(ng2.NetworkSettings.Networks[name].Aliases), 2) - assert.Equal(t, ng2.NetworkSettings.Networks[name].Aliases[0], "bbb") + assert.NilError(t, err) + assert.Check(t, is.Equal(len(ng2.NetworkSettings.Networks[name].Aliases), 2)) + assert.Check(t, is.Equal(ng2.NetworkSettings.Networks[name].Aliases[0], "bbb")) } diff --git a/vendor/github.com/docker/docker/integration/session/session_test.go b/vendor/github.com/docker/docker/integration/session/session_test.go index 310f54455..de9319436 100644 --- a/vendor/github.com/docker/docker/integration/session/session_test.go +++ b/vendor/github.com/docker/docker/integration/session/session_test.go @@ -5,9 +5,9 @@ import ( "testing" req "github.com/docker/docker/integration-cli/request" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSessionCreate(t *testing.T) { @@ -20,29 +20,29 @@ func TestSessionCreate(t *testing.T) { r.Header.Set("Upgrade", "h2c") return nil }) - require.NoError(t, err) - require.NoError(t, body.Close()) - assert.Equal(t, res.StatusCode, http.StatusSwitchingProtocols) - assert.Equal(t, res.Header.Get("Upgrade"), "h2c") + assert.NilError(t, err) + assert.NilError(t, body.Close()) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusSwitchingProtocols)) + assert.Check(t, is.Equal(res.Header.Get("Upgrade"), "h2c")) } func TestSessionCreateWithBadUpgrade(t *testing.T) { skip.If(t, !testEnv.DaemonInfo.ExperimentalBuild) res, body, err := req.Post("/session") - require.NoError(t, err) - assert.Equal(t, res.StatusCode, http.StatusBadRequest) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusBadRequest)) buf, err := req.ReadBody(body) - require.NoError(t, err) - assert.Contains(t, string(buf), "no upgrade") + assert.NilError(t, err) + assert.Check(t, is.Contains(string(buf), "no upgrade")) res, body, err = req.Post("/session", func(r *http.Request) error { r.Header.Set("Upgrade", "foo") return nil }) - require.NoError(t, err) - assert.Equal(t, res.StatusCode, http.StatusBadRequest) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusBadRequest)) buf, err = req.ReadBody(body) - require.NoError(t, err) - assert.Contains(t, string(buf), "not supported") + assert.NilError(t, err) + assert.Check(t, is.Contains(string(buf), "not supported")) } diff --git a/vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go b/vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go new file mode 100644 index 000000000..c1f864a1c --- /dev/null +++ b/vendor/github.com/docker/docker/integration/system/cgroupdriver_systemd_test.go @@ -0,0 +1,64 @@ +package system + +import ( + "context" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/integration-cli/daemon" + + "github.com/gotestyourself/gotestyourself/assert" +) + +// hasSystemd checks whether the host was booted with systemd as its init +// system. Stolen from +// https://github.com/coreos/go-systemd/blob/176f85496f4e/util/util.go#L68 +func hasSystemd() bool { + fi, err := os.Lstat("/run/systemd/system") + if err != nil { + return false + } + return fi.IsDir() +} + +// TestCgroupDriverSystemdMemoryLimit checks that container +// memory limit can be set when using systemd cgroupdriver. +// https://github.com/moby/moby/issues/35123 +func TestCgroupDriverSystemdMemoryLimit(t *testing.T) { + t.Parallel() + + if !hasSystemd() { + t.Skip("systemd not available") + } + + d := daemon.New(t, "docker", "dockerd", daemon.Config{}) + client, err := d.NewClient() + assert.NilError(t, err) + d.StartWithBusybox(t, "--exec-opt", "native.cgroupdriver=systemd", "--iptables=false") + defer d.Stop(t) + + const mem = 64 * 1024 * 1024 // 64 MB + cfg := container.Config{ + Image: "busybox", + Cmd: []string{"top"}, + } + hostcfg := container.HostConfig{ + Resources: container.Resources{ + Memory: mem, + }, + } + + ctx := context.Background() + ctr, err := client.ContainerCreate(ctx, &cfg, &hostcfg, nil, "") + assert.NilError(t, err) + defer client.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{Force: true}) + + err = client.ContainerStart(ctx, ctr.ID, types.ContainerStartOptions{}) + assert.NilError(t, err) + + s, err := client.ContainerInspect(ctx, ctr.ID) + assert.NilError(t, err) + assert.Equal(t, s.HostConfig.Memory, mem) +} diff --git a/vendor/github.com/docker/docker/integration/system/event_test.go b/vendor/github.com/docker/docker/integration/system/event_test.go index 688d7c27d..b270ffcb9 100644 --- a/vendor/github.com/docker/docker/integration/system/event_test.go +++ b/vendor/github.com/docker/docker/integration/system/event_test.go @@ -17,8 +17,8 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/pkg/jsonmessage" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestEvents(t *testing.T) { @@ -33,7 +33,7 @@ func TestEvents(t *testing.T) { Cmd: strslice.StrSlice([]string{"echo", "hello"}), }, ) - require.NoError(t, err) + assert.NilError(t, err) filters := filters.NewArgs( filters.Arg("container", cID), @@ -49,15 +49,15 @@ func TestEvents(t *testing.T) { Tty: false, }, ) - require.NoError(t, err) + assert.NilError(t, err) select { case m := <-msg: - require.Equal(t, m.Type, "container") - require.Equal(t, m.Actor.ID, cID) - require.Equal(t, m.Action, "exec_die") - require.Equal(t, m.Actor.Attributes["execID"], id.ID) - require.Equal(t, m.Actor.Attributes["exitCode"], "0") + assert.Equal(t, m.Type, "container") + assert.Equal(t, m.Actor.ID, cID) + assert.Equal(t, m.Action, "exec_die") + assert.Equal(t, m.Actor.Attributes["execID"], id.ID) + assert.Equal(t, m.Actor.Attributes["exitCode"], "0") case err = <-errors: t.Fatal(err) case <-time.After(time.Second * 3): @@ -84,16 +84,16 @@ func TestEventsBackwardsCompatible(t *testing.T) { // The test here makes sure the response time is less than 3 sec. expectedTime := time.Now().Add(3 * time.Second) emptyResp, emptyBody, err := req.Get("/events") - require.NoError(t, err) + assert.NilError(t, err) defer emptyBody.Close() - assert.Equal(t, http.StatusOK, emptyResp.StatusCode) - assert.True(t, time.Now().Before(expectedTime), "timeout waiting for events api to respond, should have responded immediately") + assert.Check(t, is.DeepEqual(http.StatusOK, emptyResp.StatusCode)) + assert.Check(t, time.Now().Before(expectedTime), "timeout waiting for events api to respond, should have responded immediately") // We also test to make sure the `events.Message` is compatible with `JSONMessage` q := url.Values{} q.Set("since", ts) _, body, err := req.Get("/events?" + q.Encode()) - require.NoError(t, err) + assert.NilError(t, err) defer body.Close() dec := json.NewDecoder(body) @@ -112,8 +112,8 @@ func TestEventsBackwardsCompatible(t *testing.T) { } } - assert.NotNil(t, containerCreateEvent) - assert.Equal(t, "create", containerCreateEvent.Status) - assert.Equal(t, cID, containerCreateEvent.ID) - assert.Equal(t, "busybox", containerCreateEvent.From) + assert.Check(t, containerCreateEvent != nil) + assert.Check(t, is.Equal("create", containerCreateEvent.Status)) + assert.Check(t, is.Equal(cID, containerCreateEvent.ID)) + assert.Check(t, is.Equal("busybox", containerCreateEvent.From)) } diff --git a/vendor/github.com/docker/docker/integration/system/info_linux_test.go b/vendor/github.com/docker/docker/integration/system/info_linux_test.go index 8f0271e7a..e8bf70f9b 100644 --- a/vendor/github.com/docker/docker/integration/system/info_linux_test.go +++ b/vendor/github.com/docker/docker/integration/system/info_linux_test.go @@ -8,8 +8,8 @@ import ( req "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -17,35 +17,32 @@ func TestInfoBinaryCommits(t *testing.T) { client := request.NewAPIClient(t) info, err := client.Info(context.Background()) - require.NoError(t, err) + assert.NilError(t, err) - assert.NotNil(t, info.ContainerdCommit) - assert.NotEqual(t, "N/A", info.ContainerdCommit.ID) - assert.Equal(t, testEnv.DaemonInfo.ContainerdCommit.Expected, info.ContainerdCommit.Expected) - assert.Equal(t, info.ContainerdCommit.Expected, info.ContainerdCommit.ID) + assert.Check(t, "N/A" != info.ContainerdCommit.ID) + assert.Check(t, is.Equal(testEnv.DaemonInfo.ContainerdCommit.Expected, info.ContainerdCommit.Expected)) + assert.Check(t, is.Equal(info.ContainerdCommit.Expected, info.ContainerdCommit.ID)) - assert.NotNil(t, info.InitCommit) - assert.NotEqual(t, "N/A", info.InitCommit.ID) - assert.Equal(t, testEnv.DaemonInfo.InitCommit.Expected, info.InitCommit.Expected) - assert.Equal(t, info.InitCommit.Expected, info.InitCommit.ID) + assert.Check(t, "N/A" != info.InitCommit.ID) + assert.Check(t, is.Equal(testEnv.DaemonInfo.InitCommit.Expected, info.InitCommit.Expected)) + assert.Check(t, is.Equal(info.InitCommit.Expected, info.InitCommit.ID)) - assert.NotNil(t, info.RuncCommit) - assert.NotEqual(t, "N/A", info.RuncCommit.ID) - assert.Equal(t, testEnv.DaemonInfo.RuncCommit.Expected, info.RuncCommit.Expected) - assert.Equal(t, info.RuncCommit.Expected, info.RuncCommit.ID) + assert.Check(t, "N/A" != info.RuncCommit.ID) + assert.Check(t, is.Equal(testEnv.DaemonInfo.RuncCommit.Expected, info.RuncCommit.Expected)) + assert.Check(t, is.Equal(info.RuncCommit.Expected, info.RuncCommit.ID)) } func TestInfoAPIVersioned(t *testing.T) { // Windows only supports 1.25 or later res, body, err := req.Get("/v1.20/info") - require.NoError(t, err) - assert.Equal(t, res.StatusCode, http.StatusOK) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(res.StatusCode, http.StatusOK)) b, err := req.ReadBody(body) - require.NoError(t, err) + assert.NilError(t, err) out := string(b) - assert.Contains(t, out, "ExecutionDriver") - assert.Contains(t, out, "not supported") + assert.Check(t, is.Contains(out, "ExecutionDriver")) + assert.Check(t, is.Contains(out, "not supported")) } diff --git a/vendor/github.com/docker/docker/integration/system/info_test.go b/vendor/github.com/docker/docker/integration/system/info_test.go index d04fdcdc8..e19ba9ba3 100644 --- a/vendor/github.com/docker/docker/integration/system/info_test.go +++ b/vendor/github.com/docker/docker/integration/system/info_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -14,7 +14,7 @@ func TestInfoAPI(t *testing.T) { client := request.NewAPIClient(t) info, err := client.Info(context.Background()) - require.NoError(t, err) + assert.NilError(t, err) // always shown fields stringsToCheck := []string{ @@ -37,6 +37,6 @@ func TestInfoAPI(t *testing.T) { out := fmt.Sprintf("%+v", info) for _, linePrefix := range stringsToCheck { - assert.Contains(t, out, linePrefix) + assert.Check(t, is.Contains(out, linePrefix)) } } diff --git a/vendor/github.com/docker/docker/integration/system/login_test.go b/vendor/github.com/docker/docker/integration/system/login_test.go index c075109d2..e8ef10c30 100644 --- a/vendor/github.com/docker/docker/integration/system/login_test.go +++ b/vendor/github.com/docker/docker/integration/system/login_test.go @@ -6,8 +6,9 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/integration/internal/requirement" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -23,5 +24,5 @@ func TestLoginFailsWithBadCredentials(t *testing.T) { } _, err := client.RegistryLogin(context.Background(), config) expected := "Error response from daemon: Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" - assert.EqualError(t, err, expected) + assert.Check(t, is.Error(err, expected)) } diff --git a/vendor/github.com/docker/docker/integration/system/version_test.go b/vendor/github.com/docker/docker/integration/system/version_test.go index 04888a604..38784e3ee 100644 --- a/vendor/github.com/docker/docker/integration/system/version_test.go +++ b/vendor/github.com/docker/docker/integration/system/version_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/docker/docker/integration/internal/request" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -13,11 +13,11 @@ func TestVersion(t *testing.T) { client := request.NewAPIClient(t) version, err := client.ServerVersion(context.Background()) - require.NoError(t, err) + assert.NilError(t, err) - assert.NotNil(t, version.APIVersion) - assert.NotNil(t, version.Version) - assert.NotNil(t, version.MinAPIVersion) - assert.Equal(t, testEnv.DaemonInfo.ExperimentalBuild, version.Experimental) - assert.Equal(t, testEnv.OSType, version.Os) + assert.Check(t, version.APIVersion != "") + assert.Check(t, version.Version != "") + assert.Check(t, version.MinAPIVersion != "") + assert.Check(t, is.Equal(testEnv.DaemonInfo.ExperimentalBuild, version.Experimental)) + assert.Check(t, is.Equal(testEnv.OSType, version.Os)) } diff --git a/vendor/github.com/docker/docker/integration/volume/volume_test.go b/vendor/github.com/docker/docker/integration/volume/volume_test.go index 38ce5782e..2fe35cf5e 100644 --- a/vendor/github.com/docker/docker/integration/volume/volume_test.go +++ b/vendor/github.com/docker/docker/integration/volume/volume_test.go @@ -13,8 +13,8 @@ import ( "github.com/docker/docker/integration/internal/container" "github.com/docker/docker/integration/internal/request" "github.com/docker/docker/internal/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestVolumesCreateAndList(t *testing.T) { @@ -26,7 +26,7 @@ func TestVolumesCreateAndList(t *testing.T) { vol, err := client.VolumeCreate(ctx, volumetypes.VolumesCreateBody{ Name: name, }) - require.NoError(t, err) + assert.NilError(t, err) expected := types.Volume{ // Ignore timestamp of CreatedAt @@ -34,17 +34,16 @@ func TestVolumesCreateAndList(t *testing.T) { Driver: "local", Scope: "local", Name: name, - Options: map[string]string{}, Mountpoint: fmt.Sprintf("%s/volumes/%s/_data", testEnv.DaemonInfo.DockerRootDir, name), } - assert.Equal(t, vol, expected) + assert.Check(t, is.DeepEqual(vol, expected)) volumes, err := client.VolumeList(ctx, filters.Args{}) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, len(volumes.Volumes), 1) - assert.NotNil(t, volumes.Volumes[0]) - assert.Equal(t, *volumes.Volumes[0], expected) + assert.Check(t, is.Equal(len(volumes.Volumes), 1)) + assert.Check(t, volumes.Volumes[0] != nil) + assert.Check(t, is.DeepEqual(*volumes.Volumes[0], expected)) } func TestVolumesRemove(t *testing.T) { @@ -57,7 +56,7 @@ func TestVolumesRemove(t *testing.T) { id := container.Create(t, ctx, client, container.WithVolume(prefix+"foo")) c, err := client.ContainerInspect(ctx, id) - require.NoError(t, err) + assert.NilError(t, err) vname := c.Mounts[0].Name err = client.VolumeRemove(ctx, vname, false) @@ -66,10 +65,10 @@ func TestVolumesRemove(t *testing.T) { err = client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{ Force: true, }) - require.NoError(t, err) + assert.NilError(t, err) err = client.VolumeRemove(ctx, vname, false) - require.NoError(t, err) + assert.NilError(t, err) } func TestVolumesInspect(t *testing.T) { @@ -84,10 +83,10 @@ func TestVolumesInspect(t *testing.T) { _, err := client.VolumeCreate(ctx, volumetypes.VolumesCreateBody{ Name: name, }) - require.NoError(t, err) + assert.NilError(t, err) vol, err := client.VolumeInspect(ctx, name) - require.NoError(t, err) + assert.NilError(t, err) expected := types.Volume{ // Ignore timestamp of CreatedAt @@ -95,16 +94,15 @@ func TestVolumesInspect(t *testing.T) { Driver: "local", Scope: "local", Name: name, - Options: map[string]string{}, Mountpoint: fmt.Sprintf("%s/volumes/%s/_data", testEnv.DaemonInfo.DockerRootDir, name), } - assert.Equal(t, vol, expected) + assert.Check(t, is.DeepEqual(vol, expected)) // comparing CreatedAt field time for the new volume to now. Removing a minute from both to avoid false positive testCreatedAt, err := time.Parse(time.RFC3339, strings.TrimSpace(vol.CreatedAt)) - require.NoError(t, err) + assert.NilError(t, err) testCreatedAt = testCreatedAt.Truncate(time.Minute) - assert.Equal(t, testCreatedAt.Equal(now), true, "Time Volume is CreatedAt not equal to current time") + assert.Check(t, is.Equal(testCreatedAt.Equal(now), true), "Time Volume is CreatedAt not equal to current time") } func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { diff --git a/vendor/github.com/docker/docker/internal/test/environment/clean.go b/vendor/github.com/docker/docker/internal/test/environment/clean.go index 065b46bee..d83175c84 100644 --- a/vendor/github.com/docker/docker/internal/test/environment/clean.go +++ b/vendor/github.com/docker/docker/internal/test/environment/clean.go @@ -7,13 +7,12 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" "golang.org/x/net/context" ) type testingT interface { - require.TestingT + assert.TestingT logT Fatalf(string, ...interface{}) } @@ -47,7 +46,7 @@ func unpauseAllContainers(t assert.TestingT, client client.ContainerAPIClient) { if len(containers) > 0 { for _, container := range containers { err := client.ContainerUnpause(ctx, container.ID) - assert.NoError(t, err, "failed to unpause container %s", container.ID) + assert.Check(t, err, "failed to unpause container %s", container.ID) } } } @@ -60,7 +59,7 @@ func getPausedContainers(ctx context.Context, t assert.TestingT, client client.C Quiet: true, All: true, }) - assert.NoError(t, err, "failed to list containers") + assert.Check(t, err, "failed to list containers") return containers } @@ -84,7 +83,7 @@ func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient, if err == nil || client.IsErrNotFound(err) || alreadyExists.MatchString(err.Error()) || isErrNotFoundSwarmClassic(err) { continue } - assert.NoError(t, err, "failed to remove %s", container.ID) + assert.Check(t, err, "failed to remove %s", container.ID) } } @@ -93,13 +92,13 @@ func getAllContainers(ctx context.Context, t assert.TestingT, client client.Cont Quiet: true, All: true, }) - assert.NoError(t, err, "failed to list containers") + assert.Check(t, err, "failed to list containers") return containers } func deleteAllImages(t testingT, apiclient client.ImageAPIClient, protectedImages map[string]struct{}) { images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{}) - assert.NoError(t, err, "failed to list images") + assert.Check(t, err, "failed to list images") ctx := context.Background() for _, image := range images { @@ -126,12 +125,12 @@ func removeImage(ctx context.Context, t assert.TestingT, apiclient client.ImageA if client.IsErrNotFound(err) { return } - assert.NoError(t, err, "failed to remove image %s", ref) + assert.Check(t, err, "failed to remove image %s", ref) } func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) { volumes, err := c.VolumeList(context.Background(), filters.Args{}) - assert.NoError(t, err, "failed to list volumes") + assert.Check(t, err, "failed to list volumes") for _, v := range volumes.Volumes { if _, ok := protectedVolumes[v.Name]; ok { @@ -142,13 +141,13 @@ func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolu if isErrNotFoundSwarmClassic(err) { continue } - assert.NoError(t, err, "failed to remove volume %s", v.Name) + assert.Check(t, err, "failed to remove volume %s", v.Name) } } func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) { networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) - assert.NoError(t, err, "failed to list networks") + assert.Check(t, err, "failed to list networks") for _, n := range networks { if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { @@ -162,7 +161,7 @@ func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatf continue } err := c.NetworkRemove(context.Background(), n.ID) - assert.NoError(t, err, "failed to remove network %s", n.ID) + assert.Check(t, err, "failed to remove network %s", n.ID) } } @@ -172,14 +171,14 @@ func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient, protectedPlug if client.IsErrNotImplemented(err) { return } - assert.NoError(t, err, "failed to list plugins") + assert.Check(t, err, "failed to list plugins") for _, p := range plugins { if _, ok := protectedPlugins[p.Name]; ok { continue } err := c.PluginRemove(context.Background(), p.Name, types.PluginRemoveOptions{Force: true}) - assert.NoError(t, err, "failed to remove plugin %s", p.ID) + assert.Check(t, err, "failed to remove plugin %s", p.ID) } } diff --git a/vendor/github.com/docker/docker/internal/test/environment/environment.go b/vendor/github.com/docker/docker/internal/test/environment/environment.go index 16f614633..8e6e2c72f 100644 --- a/vendor/github.com/docker/docker/internal/test/environment/environment.go +++ b/vendor/github.com/docker/docker/internal/test/environment/environment.go @@ -121,6 +121,15 @@ func (e *Execution) IsRemoteDaemon() bool { return !e.IsLocalDaemon() } +// DaemonAPIVersion returns the negociated daemon api version +func (e *Execution) DaemonAPIVersion() string { + version, err := e.APIClient().ServerVersion(context.TODO()) + if err != nil { + return "" + } + return version.APIVersion +} + // Print the execution details to stdout // TODO: print everything func (e *Execution) Print() { diff --git a/vendor/github.com/docker/docker/internal/test/environment/protect.go b/vendor/github.com/docker/docker/internal/test/environment/protect.go index ffbf985b7..3dfe606ce 100644 --- a/vendor/github.com/docker/docker/internal/test/environment/protect.go +++ b/vendor/github.com/docker/docker/internal/test/environment/protect.go @@ -6,7 +6,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" dclient "github.com/docker/docker/client" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:jessie"} @@ -57,12 +57,12 @@ func ProtectContainers(t testingT, testEnv *Execution) { testEnv.ProtectContainer(t, containers...) } -func getExistingContainers(t require.TestingT, testEnv *Execution) []string { +func getExistingContainers(t assert.TestingT, testEnv *Execution) []string { client := testEnv.APIClient() containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{ All: true, }) - require.NoError(t, err, "failed to list containers") + assert.NilError(t, err, "failed to list containers") containers := []string{} for _, container := range containerList { @@ -89,7 +89,7 @@ func ProtectImages(t testingT, testEnv *Execution) { testEnv.ProtectImage(t, images...) } -func getExistingImages(t require.TestingT, testEnv *Execution) []string { +func getExistingImages(t assert.TestingT, testEnv *Execution) []string { client := testEnv.APIClient() filter := filters.NewArgs() filter.Add("dangling", "false") @@ -97,7 +97,7 @@ func getExistingImages(t require.TestingT, testEnv *Execution) []string { All: true, Filters: filter, }) - require.NoError(t, err, "failed to list images") + assert.NilError(t, err, "failed to list images") images := []string{} for _, image := range imageList { @@ -136,10 +136,10 @@ func ProtectNetworks(t testingT, testEnv *Execution) { testEnv.ProtectNetwork(t, networks...) } -func getExistingNetworks(t require.TestingT, testEnv *Execution) []string { +func getExistingNetworks(t assert.TestingT, testEnv *Execution) []string { client := testEnv.APIClient() networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{}) - require.NoError(t, err, "failed to list networks") + assert.NilError(t, err, "failed to list networks") networks := []string{} for _, network := range networkList { @@ -162,14 +162,14 @@ func ProtectPlugins(t testingT, testEnv *Execution) { testEnv.ProtectPlugin(t, plugins...) } -func getExistingPlugins(t require.TestingT, testEnv *Execution) []string { +func getExistingPlugins(t assert.TestingT, testEnv *Execution) []string { client := testEnv.APIClient() pluginList, err := client.PluginList(context.Background(), filters.Args{}) // Docker EE does not allow cluster-wide plugin management. if dclient.IsErrNotImplemented(err) { return []string{} } - require.NoError(t, err, "failed to list plugins") + assert.NilError(t, err, "failed to list plugins") plugins := []string{} for _, plugin := range pluginList { @@ -192,10 +192,10 @@ func ProtectVolumes(t testingT, testEnv *Execution) { testEnv.ProtectVolume(t, volumes...) } -func getExistingVolumes(t require.TestingT, testEnv *Execution) []string { +func getExistingVolumes(t assert.TestingT, testEnv *Execution) []string { client := testEnv.APIClient() volumeList, err := client.VolumeList(context.Background(), filters.Args{}) - require.NoError(t, err, "failed to list volumes") + assert.NilError(t, err, "failed to list volumes") volumes := []string{} for _, volume := range volumeList.Volumes { diff --git a/vendor/github.com/docker/docker/internal/testutil/helpers.go b/vendor/github.com/docker/docker/internal/testutil/helpers.go index 77224a007..89cb552fe 100644 --- a/vendor/github.com/docker/docker/internal/testutil/helpers.go +++ b/vendor/github.com/docker/docker/internal/testutil/helpers.go @@ -3,15 +3,21 @@ package testutil // import "github.com/docker/docker/internal/testutil" import ( "io" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) +type helperT interface { + Helper() +} + // ErrorContains checks that the error is not nil, and contains the expected // substring. -func ErrorContains(t require.TestingT, err error, expectedError string, msgAndArgs ...interface{}) { - require.Error(t, err, msgAndArgs...) - assert.Contains(t, err.Error(), expectedError, msgAndArgs...) +// Deprecated: use assert.Assert(t, cmp.ErrorContains(err, expected)) +func ErrorContains(t assert.TestingT, err error, expectedError string, msgAndArgs ...interface{}) { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + assert.ErrorContains(t, err, expectedError, msgAndArgs...) } // DevZero acts like /dev/zero but in an OS-independent fashion. diff --git a/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go b/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go index a0f95755c..1dd09af95 100644 --- a/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go +++ b/vendor/github.com/docker/docker/internal/testutil/stringutils_test.go @@ -3,13 +3,14 @@ package testutil // import "github.com/docker/docker/internal/testutil" import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func testLengthHelper(generator func(int) string, t *testing.T) { expectedLength := 20 s := generator(expectedLength) - assert.Equal(t, expectedLength, len(s)) + assert.Check(t, is.Equal(expectedLength, len(s))) } func testUniquenessHelper(generator func(int) string, t *testing.T) { @@ -17,9 +18,9 @@ func testUniquenessHelper(generator func(int) string, t *testing.T) { set := make(map[string]struct{}, repeats) for i := 0; i < repeats; i = i + 1 { str := generator(64) - assert.Equal(t, 64, len(str)) + assert.Check(t, is.Equal(64, len(str))) _, ok := set[str] - assert.False(t, ok, "Random number is repeated") + assert.Check(t, !ok, "Random number is repeated") set[str] = struct{}{} } } diff --git a/vendor/github.com/docker/docker/libcontainerd/queue_test.go b/vendor/github.com/docker/docker/libcontainerd/queue_test.go index 92ee22a9f..df5332c12 100644 --- a/vendor/github.com/docker/docker/libcontainerd/queue_test.go +++ b/vendor/github.com/docker/docker/libcontainerd/queue_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestSerialization(t *testing.T) { @@ -16,15 +16,15 @@ func TestSerialization(t *testing.T) { q.append("aaa", func() { //simulate a long time task time.Sleep(10 * time.Millisecond) - require.EqualValues(t, serialization, 1) + assert.Equal(t, serialization, 1) serialization = 2 }) q.append("aaa", func() { - require.EqualValues(t, serialization, 2) + assert.Equal(t, serialization, 2) serialization = 3 }) q.append("aaa", func() { - require.EqualValues(t, serialization, 3) + assert.Equal(t, serialization, 3) serialization = 4 }) time.Sleep(20 * time.Millisecond) diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go index fb3e0bdda..35ccc0e4a 100644 --- a/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go +++ b/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go @@ -263,11 +263,15 @@ func (r *remote) startContainerd() error { func (r *remote) monitorConnection(monitor *containerd.Client) { var transientFailureCount = 0 - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - for { - <-ticker.C + select { + case <-r.shutdownContext.Done(): + r.logger.Info("stopping healthcheck following graceful shutdown") + monitor.Close() + return + case <-time.After(500 * time.Millisecond): + } + ctx, cancel := context.WithTimeout(r.shutdownContext, healthCheckTimeout) _, err := monitor.IsServing(ctx) cancel() diff --git a/vendor/github.com/docker/docker/opts/quotedstring_test.go b/vendor/github.com/docker/docker/opts/quotedstring_test.go index e24257a5d..21e6e4c85 100644 --- a/vendor/github.com/docker/docker/opts/quotedstring_test.go +++ b/vendor/github.com/docker/docker/opts/quotedstring_test.go @@ -3,27 +3,28 @@ package opts // import "github.com/docker/docker/opts" import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestQuotedStringSetWithQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NoError(t, qs.Set(`"something"`)) - assert.Equal(t, "something", qs.String()) - assert.Equal(t, "something", value) + assert.Check(t, qs.Set(`"something"`)) + assert.Check(t, is.Equal("something", qs.String())) + assert.Check(t, is.Equal("something", value)) } func TestQuotedStringSetWithMismatchedQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NoError(t, qs.Set(`"something'`)) - assert.Equal(t, `"something'`, qs.String()) + assert.Check(t, qs.Set(`"something'`)) + assert.Check(t, is.Equal(`"something'`, qs.String())) } func TestQuotedStringSetWithNoQuotes(t *testing.T) { value := "" qs := NewQuotedString(&value) - assert.NoError(t, qs.Set("something")) - assert.Equal(t, "something", qs.String()) + assert.Check(t, qs.Set("something")) + assert.Check(t, is.Equal("something", qs.String())) } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go index e48ef1aa1..b397c8abf 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/docker/docker/pkg/system" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" "golang.org/x/sys/unix" ) @@ -24,35 +24,35 @@ import ( func setupOverlayTestDir(t *testing.T, src string) { // Create opaque directory containing single file and permission 0700 err := os.Mkdir(filepath.Join(src, "d1"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = system.Lsetxattr(filepath.Join(src, "d1"), "trusted.overlay.opaque", []byte("y"), 0) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(src, "d1", "f1"), []byte{}, 0600) - require.NoError(t, err) + assert.NilError(t, err) // Create another opaque directory containing single file but with permission 0750 err = os.Mkdir(filepath.Join(src, "d2"), 0750) - require.NoError(t, err) + assert.NilError(t, err) err = system.Lsetxattr(filepath.Join(src, "d2"), "trusted.overlay.opaque", []byte("y"), 0) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(src, "d2", "f1"), []byte{}, 0660) - require.NoError(t, err) + assert.NilError(t, err) // Create regular directory with deleted file err = os.Mkdir(filepath.Join(src, "d3"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = system.Mknod(filepath.Join(src, "d3", "f1"), unix.S_IFCHR, 0) - require.NoError(t, err) + assert.NilError(t, err) } func checkOpaqueness(t *testing.T, path string, opaque string) { xattrOpaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - require.NoError(t, err) + assert.NilError(t, err) if string(xattrOpaque) != opaque { t.Fatalf("Unexpected opaque value: %q, expected %q", string(xattrOpaque), opaque) @@ -62,7 +62,7 @@ func checkOpaqueness(t *testing.T, path string, opaque string) { func checkOverlayWhiteout(t *testing.T, path string) { stat, err := os.Stat(path) - require.NoError(t, err) + assert.NilError(t, err) statT, ok := stat.Sys().(*syscall.Stat_t) if !ok { @@ -75,7 +75,7 @@ func checkOverlayWhiteout(t *testing.T, path string) { func checkFileMode(t *testing.T, path string, perm os.FileMode) { stat, err := os.Stat(path) - require.NoError(t, err) + assert.NilError(t, err) if stat.Mode() != perm { t.Fatalf("Unexpected file mode for %s: %o, expected %o", path, stat.Mode(), perm) @@ -84,17 +84,17 @@ func checkFileMode(t *testing.T, path string, perm os.FileMode) { func TestOverlayTarUntar(t *testing.T) { oldmask, err := system.Umask(0) - require.NoError(t, err) + assert.NilError(t, err) defer system.Umask(oldmask) src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(src) setupOverlayTestDir(t, src) dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dst) options := &TarOptions{ @@ -102,11 +102,11 @@ func TestOverlayTarUntar(t *testing.T) { WhiteoutFormat: OverlayWhiteoutFormat, } archive, err := TarWithOptions(src, options) - require.NoError(t, err) + assert.NilError(t, err) defer archive.Close() err = Untar(archive, dst, options) - require.NoError(t, err) + assert.NilError(t, err) checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d2"), 0750|os.ModeDir) @@ -123,31 +123,31 @@ func TestOverlayTarUntar(t *testing.T) { func TestOverlayTarAUFSUntar(t *testing.T) { oldmask, err := system.Umask(0) - require.NoError(t, err) + assert.NilError(t, err) defer system.Umask(oldmask) src, err := ioutil.TempDir("", "docker-test-overlay-tar-src") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(src) setupOverlayTestDir(t, src) dst, err := ioutil.TempDir("", "docker-test-overlay-tar-dst") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dst) archive, err := TarWithOptions(src, &TarOptions{ Compression: Uncompressed, WhiteoutFormat: OverlayWhiteoutFormat, }) - require.NoError(t, err) + assert.NilError(t, err) defer archive.Close() err = Untar(archive, dst, &TarOptions{ Compression: Uncompressed, WhiteoutFormat: AUFSWhiteoutFormat, }) - require.NoError(t, err) + assert.NilError(t, err) checkFileMode(t, filepath.Join(dst, "d1"), 0700|os.ModeDir) checkFileMode(t, filepath.Join(dst, "d1", WhiteoutOpaqueDir), 0700) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go index 70db8d4a1..e8d12dd72 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_test.go @@ -10,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "reflect" "runtime" "strings" "testing" @@ -17,8 +18,8 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) var tmp string @@ -263,7 +264,7 @@ func TestCmdStreamGood(t *testing.T) { func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file @@ -282,7 +283,7 @@ func TestUntarPathWithInvalidDest(t *testing.T) { cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() - require.NoError(t, err) + assert.NilError(t, err) err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { @@ -304,7 +305,7 @@ func TestUntarPathWithInvalidSrc(t *testing.T) { func TestUntarPath(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") @@ -325,7 +326,7 @@ func TestUntarPath(t *testing.T) { } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() - require.NoError(t, err) + assert.NilError(t, err) err = defaultUntarPath(tarFile, destFolder) if err != nil { @@ -726,12 +727,12 @@ func TestTarUntar(t *testing.T) { func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-chown-opt") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(origin) filePath := filepath.Join(origin, "1") err = ioutil.WriteFile(filePath, []byte("hello world"), 0700) - require.NoError(t, err) + assert.NilError(t, err) idMaps := []idtools.IDMap{ 0: { @@ -759,7 +760,7 @@ func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { } for _, testCase := range cases { reader, err := TarWithOptions(filePath, testCase.opts) - require.NoError(t, err) + assert.NilError(t, err) tr := tar.NewReader(reader) defer reader.Close() for { @@ -768,9 +769,9 @@ func TestTarWithOptionsChownOptsAlwaysOverridesIdPair(t *testing.T) { // end of tar archive break } - require.NoError(t, err) - assert.Equal(t, hdr.Uid, testCase.expectedUID, "Uid equals expected value") - assert.Equal(t, hdr.Gid, testCase.expectedGID, "Gid equals expected value") + assert.NilError(t, err) + assert.Check(t, is.Equal(hdr.Uid, testCase.expectedUID), "Uid equals expected value") + assert.Check(t, is.Equal(hdr.Gid, testCase.expectedGID), "Gid equals expected value") } } } @@ -1182,10 +1183,10 @@ func TestUntarInvalidSymlink(t *testing.T) { func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") - require.NoError(t, err) + assert.NilError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) - require.NoError(t, err) + assert.NilError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } @@ -1244,7 +1245,7 @@ func TestReplaceFileTarWrapper(t *testing.T) { map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) - assert.Equal(t, testcase.expected, actual, testcase.doc) + assert.Check(t, is.Equal(testcase.expected, actual), testcase.doc) } } @@ -1255,27 +1256,27 @@ func TestPrefixHeaderReadable(t *testing.T) { var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") tmpDir, err := ioutil.TempDir("", "prefix-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmpDir) err = Untar(bytes.NewReader(testFile), tmpDir, nil) - require.NoError(t, err) + assert.NilError(t, err) baseName := "foo" pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName _, err = os.Lstat(filepath.Join(tmpDir, pth)) - require.NoError(t, err) + assert.NilError(t, err) } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - require.NoError(t, err) + assert.NilError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) - require.NoError(t, err) + assert.NilError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) - require.NoError(t, err) + assert.NilError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() @@ -1291,7 +1292,7 @@ func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { - assert.Nil(t, content) + assert.Check(t, is.Nil(content)) return createOrReplaceModifier(path, header, content) } } @@ -1309,17 +1310,17 @@ func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.He func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { destDir, err := ioutil.TempDir("", "docker-test-destDir") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) - require.NoError(t, err) + assert.NilError(t, err) files, _ := ioutil.ReadDir(destDir) - assert.Len(t, files, expectedCount, doc) + assert.Check(t, is.Len(files, expectedCount), doc) content, err := ioutil.ReadFile(filepath.Join(destDir, name)) - assert.NoError(t, err) + assert.Check(t, err) return string(content) } @@ -1338,7 +1339,7 @@ func TestDisablePigz(t *testing.T) { // For the context canceller contextReaderCloserWrapper := outsideReaderCloserWrapper.Reader.(*ioutils.ReadCloserWrapper) - assert.IsType(t, &gzip.Reader{}, contextReaderCloserWrapper.Reader) + assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } func TestPigz(t *testing.T) { @@ -1351,9 +1352,9 @@ func TestPigz(t *testing.T) { _, err := exec.LookPath("unpigz") if err == nil { t.Log("Tested whether Pigz is used, as it installed") - assert.IsType(t, &io.PipeReader{}, contextReaderCloserWrapper.Reader) + assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&io.PipeReader{})) } else { t.Log("Tested whether Pigz is not used, as it not installed") - assert.IsType(t, &gzip.Reader{}, contextReaderCloserWrapper.Reader) + assert.Equal(t, reflect.TypeOf(contextReaderCloserWrapper.Reader), reflect.TypeOf(&gzip.Reader{})) } } diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go index 17de96e94..5d13c3542 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/docker/docker/pkg/system" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/sys/unix" ) @@ -72,18 +72,18 @@ func TestChmodTarEntry(t *testing.T) { func TestTarWithHardLink(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(origin) err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) - require.NoError(t, err) + assert.NilError(t, err) var i1, i2 uint64 i1, err = getNlink(filepath.Join(origin, "1")) - require.NoError(t, err) + assert.NilError(t, err) // sanity check that we can hardlink if i1 != 2 { @@ -91,48 +91,48 @@ func TestTarWithHardLink(t *testing.T) { } dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) - require.NoError(t, err) + assert.NilError(t, err) // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) - require.NoError(t, err) + assert.NilError(t, err) bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - require.NoError(t, err) + assert.NilError(t, err) i1, err = getInode(filepath.Join(dest, "1")) - require.NoError(t, err) + assert.NilError(t, err) i2, err = getInode(filepath.Join(dest, "2")) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, i1, i2) + assert.Check(t, is.Equal(i1, i2)) } func TestTarWithHardLinkAndRebase(t *testing.T) { tmpDir, err := ioutil.TempDir("", "docker-test-tar-hardlink-rebase") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmpDir) origin := filepath.Join(tmpDir, "origin") err = os.Mkdir(origin, 0700) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")) - require.NoError(t, err) + assert.NilError(t, err) var i1, i2 uint64 i1, err = getNlink(filepath.Join(origin, "1")) - require.NoError(t, err) + assert.NilError(t, err) // sanity check that we can hardlink if i1 != 2 { @@ -141,20 +141,20 @@ func TestTarWithHardLinkAndRebase(t *testing.T) { dest := filepath.Join(tmpDir, "dest") bRdr, err := TarResourceRebase(origin, "origin") - require.NoError(t, err) + assert.NilError(t, err) dstDir, srcBase := SplitPathDirEntry(origin) _, dstBase := SplitPathDirEntry(dest) content := RebaseArchiveEntries(bRdr, srcBase, dstBase) err = Untar(content, dstDir, &TarOptions{Compression: Uncompressed, NoLchown: true, NoOverwriteDirNonDir: true}) - require.NoError(t, err) + assert.NilError(t, err) i1, err = getInode(filepath.Join(dest, "1")) - require.NoError(t, err) + assert.NilError(t, err) i2, err = getInode(filepath.Join(dest, "2")) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, i1, i2) + assert.Check(t, is.Equal(i1, i2)) } func getNlink(path string) (uint64, error) { @@ -184,37 +184,37 @@ func getInode(path string) (uint64, error) { func TestTarWithBlockCharFifo(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(origin) err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = system.Mknod(filepath.Join(origin, "2"), unix.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))) - require.NoError(t, err) + assert.NilError(t, err) err = system.Mknod(filepath.Join(origin, "3"), unix.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))) - require.NoError(t, err) + assert.NilError(t, err) err = system.Mknod(filepath.Join(origin, "4"), unix.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))) - require.NoError(t, err) + assert.NilError(t, err) dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dest) // we'll do this in two steps to separate failure fh, err := Tar(origin, Uncompressed) - require.NoError(t, err) + assert.NilError(t, err) // ensure we can read the whole thing with no error, before writing back out buf, err := ioutil.ReadAll(fh) - require.NoError(t, err) + assert.NilError(t, err) bRdr := bytes.NewReader(buf) err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - require.NoError(t, err) + assert.NilError(t, err) changes, err := ChangesDirs(origin, dest) - require.NoError(t, err) + assert.NilError(t, err) if len(changes) > 0 { t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) @@ -224,17 +224,17 @@ func TestTarWithBlockCharFifo(t *testing.T) { // TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows func TestTarUntarWithXattr(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(origin) err = ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700) - require.NoError(t, err) + assert.NilError(t, err) err = system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0) - require.NoError(t, err) + assert.NilError(t, err) for _, c := range []Compression{ Uncompressed, @@ -309,7 +309,7 @@ func TestCopyInfoDestinationPathSymlink(t *testing.T) { for _, info := range testData { p := filepath.Join(tmpDir, info.resource.path, info.file) ci, err := CopyInfoDestinationPath(p) - assert.NoError(t, err) - assert.Equal(t, info.expected, ci) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(info.expected, ci)) } } diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go index f316cd320..2d316e77b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/docker/docker/pkg/system" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func max(x, y int) int { @@ -76,19 +76,19 @@ func provisionSampleDir(t *testing.T, root string, files []FileData) { p := path.Join(root, info.path) if info.filetype == Dir { err := os.MkdirAll(p, info.permissions) - require.NoError(t, err) + assert.NilError(t, err) } else if info.filetype == Regular { err := ioutil.WriteFile(p, []byte(info.contents), info.permissions) - require.NoError(t, err) + assert.NilError(t, err) } else if info.filetype == Symlink { err := os.Symlink(info.contents, p) - require.NoError(t, err) + assert.NilError(t, err) } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs err := system.Chtimes(p, now, now) - require.NoError(t, err) + assert.NilError(t, err) } } } @@ -118,14 +118,14 @@ func TestChangesWithNoChanges(t *testing.T) { t.Skip("symlinks on Windows") } rwLayer, err := ioutil.TempDir("", "docker-changes-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(rwLayer) layer, err := ioutil.TempDir("", "docker-changes-test-layer") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(layer) createSampleDir(t, layer) changes, err := Changes([]string{layer}, rwLayer) - require.NoError(t, err) + assert.NilError(t, err) if len(changes) != 0 { t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) } @@ -139,14 +139,14 @@ func TestChangesWithChanges(t *testing.T) { } // Mock the readonly layer layer, err := ioutil.TempDir("", "docker-changes-test-layer") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(layer) createSampleDir(t, layer) os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) // Mock the RW layer rwLayer, err := ioutil.TempDir("", "docker-changes-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(rwLayer) // Create a folder in RW layer @@ -163,7 +163,7 @@ func TestChangesWithChanges(t *testing.T) { ioutil.WriteFile(newFile, []byte{}, 0740) changes, err := Changes([]string{layer}, rwLayer) - require.NoError(t, err) + assert.NilError(t, err) expectedChanges := []Change{ {"/dir1", ChangeModify}, @@ -183,7 +183,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { t.Skip("symlinks on Windows") } baseLayer, err := ioutil.TempDir("", "docker-changes-test.") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(baseLayer) dir3 := path.Join(baseLayer, "dir1/dir2/dir3") @@ -193,7 +193,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("hello"), 0666) layer, err := ioutil.TempDir("", "docker-changes-test2.") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(layer) // Test creating a new file @@ -206,7 +206,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("bye"), 0666) changes, err := Changes([]string{baseLayer}, layer) - require.NoError(t, err) + assert.NilError(t, err) expectedChanges := []Change{ {"/dir1/dir2/dir3", ChangeModify}, @@ -216,7 +216,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { // Now test changing a file layer, err = ioutil.TempDir("", "docker-changes-test3.") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(layer) if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { @@ -227,7 +227,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("bye"), 0666) changes, err = Changes([]string{baseLayer}, layer) - require.NoError(t, err) + assert.NilError(t, err) expectedChanges = []Change{ {"/dir1/dir2/dir3/file.txt", ChangeModify}, @@ -243,15 +243,15 @@ func TestChangesDirsEmpty(t *testing.T) { t.Skip("symlinks on Windows") } src, err := ioutil.TempDir("", "docker-changes-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" err = copyDir(src, dst) - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) - require.NoError(t, err) + assert.NilError(t, err) if len(changes) != 0 { t.Fatalf("Reported changes for identical dirs: %v", changes) @@ -263,64 +263,64 @@ func TestChangesDirsEmpty(t *testing.T) { func mutateSampleDir(t *testing.T, root string) { // Remove a regular file err := os.RemoveAll(path.Join(root, "file1")) - require.NoError(t, err) + assert.NilError(t, err) // Remove a directory err = os.RemoveAll(path.Join(root, "dir1")) - require.NoError(t, err) + assert.NilError(t, err) // Remove a symlink err = os.RemoveAll(path.Join(root, "symlink1")) - require.NoError(t, err) + assert.NilError(t, err) // Rewrite a file err = ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777) - require.NoError(t, err) + assert.NilError(t, err) // Replace a file err = os.RemoveAll(path.Join(root, "file3")) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404) - require.NoError(t, err) + assert.NilError(t, err) // Touch file err = system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)) - require.NoError(t, err) + assert.NilError(t, err) // Replace file with dir err = os.RemoveAll(path.Join(root, "file5")) - require.NoError(t, err) + assert.NilError(t, err) err = os.MkdirAll(path.Join(root, "file5"), 0666) - require.NoError(t, err) + assert.NilError(t, err) // Create new file err = ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777) - require.NoError(t, err) + assert.NilError(t, err) // Create new dir err = os.MkdirAll(path.Join(root, "dirnew"), 0766) - require.NoError(t, err) + assert.NilError(t, err) // Create a new symlink err = os.Symlink("targetnew", path.Join(root, "symlinknew")) - require.NoError(t, err) + assert.NilError(t, err) // Change a symlink err = os.RemoveAll(path.Join(root, "symlink2")) - require.NoError(t, err) + assert.NilError(t, err) err = os.Symlink("target2change", path.Join(root, "symlink2")) - require.NoError(t, err) + assert.NilError(t, err) // Replace dir with file err = os.RemoveAll(path.Join(root, "dir2")) - require.NoError(t, err) + assert.NilError(t, err) err = ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777) - require.NoError(t, err) + assert.NilError(t, err) // Touch dir err = system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)) - require.NoError(t, err) + assert.NilError(t, err) } func TestChangesDirsMutated(t *testing.T) { @@ -330,18 +330,18 @@ func TestChangesDirsMutated(t *testing.T) { t.Skip("symlinks on Windows") } src, err := ioutil.TempDir("", "docker-changes-test") - require.NoError(t, err) + assert.NilError(t, err) createSampleDir(t, src) dst := src + "-copy" err = copyDir(src, dst) - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(src) defer os.RemoveAll(dst) mutateSampleDir(t, dst) changes, err := ChangesDirs(dst, src) - require.NoError(t, err) + assert.NilError(t, err) sort.Sort(changesByPath(changes)) @@ -386,29 +386,29 @@ func TestApplyLayer(t *testing.T) { t.Skip("symlinks on Windows") } src, err := ioutil.TempDir("", "docker-changes-test") - require.NoError(t, err) + assert.NilError(t, err) createSampleDir(t, src) defer os.RemoveAll(src) dst := src + "-copy" err = copyDir(src, dst) - require.NoError(t, err) + assert.NilError(t, err) mutateSampleDir(t, dst) defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) - require.NoError(t, err) + assert.NilError(t, err) layer, err := ExportChanges(dst, changes, nil, nil) - require.NoError(t, err) + assert.NilError(t, err) layerCopy, err := NewTempArchive(layer, "") - require.NoError(t, err) + assert.NilError(t, err) _, err = ApplyLayer(src, layerCopy) - require.NoError(t, err) + assert.NilError(t, err) changes2, err := ChangesDirs(src, dst) - require.NoError(t, err) + assert.NilError(t, err) if len(changes2) != 0 { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) @@ -422,18 +422,18 @@ func TestChangesSizeWithHardlinks(t *testing.T) { t.Skip("hardlinks on Windows") } srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(srcDir) destDir, err := ioutil.TempDir("", "docker-test-destDir") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(destDir) creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - require.NoError(t, err) + assert.NilError(t, err) changes, err := ChangesDirs(destDir, srcDir) - require.NoError(t, err) + assert.NilError(t, err) got := ChangesSize(destDir, changes) if got != int64(creationSize) { @@ -460,14 +460,14 @@ func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { func TestChangesSize(t *testing.T) { parentPath, err := ioutil.TempDir("", "docker-changes-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(parentPath) addition := path.Join(parentPath, "addition") err = ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744) - require.NoError(t, err) + assert.NilError(t, err) modification := path.Join(parentPath, "modification") err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744) - require.NoError(t, err) + assert.NilError(t, err) changes := []Change{ {Path: "addition", Kind: ChangeAdd}, diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go index 3126d8b51..08b1702cf 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go @@ -16,7 +16,7 @@ import ( "strings" "testing" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func removeAllPaths(paths ...string) { @@ -29,10 +29,10 @@ func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { var err error tmpDirA, err = ioutil.TempDir("", "archive-copy-test") - require.NoError(t, err) + assert.NilError(t, err) tmpDirB, err = ioutil.TempDir("", "archive-copy-test") - require.NoError(t, err) + assert.NilError(t, err) return } @@ -119,7 +119,7 @@ func logDirContents(t *testing.T, dirPath string) { t.Logf("logging directory contents: %q", dirPath) err := filepath.Walk(dirPath, logWalkedPaths) - require.NoError(t, err) + assert.NilError(t, err) } func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { @@ -293,7 +293,7 @@ func TestCopyCaseA(t *testing.T) { } err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + assert.NilError(t, err) os.Remove(dstPath) symlinkPath := filepath.Join(tmpDirA, "symlink3") @@ -305,14 +305,14 @@ func TestCopyCaseA(t *testing.T) { } err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + assert.NilError(t, err) os.Remove(dstPath) if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { t.Fatalf("unexpected error %T: %s", err, err) } err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + assert.NilError(t, err) } // B. SRC specifies a file and DST (with trailing path separator) doesn't @@ -374,7 +374,7 @@ func TestCopyCaseC(t *testing.T) { } err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + assert.NilError(t, err) } // C. Symbol link following version: @@ -411,7 +411,7 @@ func TestCopyCaseCFSym(t *testing.T) { } err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + assert.NilError(t, err) } // D. SRC specifies a file and DST exists as a directory. This should place @@ -441,7 +441,7 @@ func TestCopyCaseD(t *testing.T) { } err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -460,7 +460,7 @@ func TestCopyCaseD(t *testing.T) { } err = fileContentsEqual(t, srcPath, dstPath) - require.NoError(t, err) + assert.NilError(t, err) } // D. Symbol link following version: @@ -492,7 +492,7 @@ func TestCopyCaseDFSym(t *testing.T) { } err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -511,7 +511,7 @@ func TestCopyCaseDFSym(t *testing.T) { } err = fileContentsEqual(t, linkTarget, dstPath) - require.NoError(t, err) + assert.NilError(t, err) } // E. SRC specifies a directory and DST does not exist. This should create a @@ -554,7 +554,7 @@ func TestCopyCaseE(t *testing.T) { } err = dirContentsEqual(t, dstDir, srcDir) - require.NoError(t, err) + assert.NilError(t, err) } // E. Symbol link following version: @@ -599,7 +599,7 @@ func TestCopyCaseEFSym(t *testing.T) { } err = dirContentsEqual(t, dstDir, linkTarget) - require.NoError(t, err) + assert.NilError(t, err) } // F. SRC specifies a directory and DST exists as a file. This should cause an @@ -658,7 +658,7 @@ func TestCopyCaseG(t *testing.T) { } err = dirContentsEqual(t, resultDir, srcDir) - require.NoError(t, err) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -677,7 +677,7 @@ func TestCopyCaseG(t *testing.T) { } err = dirContentsEqual(t, resultDir, srcDir) - require.NoError(t, err) + assert.NilError(t, err) } // G. Symbol link version: @@ -704,7 +704,7 @@ func TestCopyCaseGFSym(t *testing.T) { } err = dirContentsEqual(t, resultDir, linkTarget) - require.NoError(t, err) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -723,7 +723,7 @@ func TestCopyCaseGFSym(t *testing.T) { } err = dirContentsEqual(t, resultDir, linkTarget) - require.NoError(t, err) + assert.NilError(t, err) } // H. SRC specifies a directory's contents only and DST does not exist. This @@ -884,7 +884,7 @@ func TestCopyCaseJ(t *testing.T) { } err = dirContentsEqual(t, dstDir, srcDir) - require.NoError(t, err) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -903,7 +903,7 @@ func TestCopyCaseJ(t *testing.T) { } err = dirContentsEqual(t, dstDir, srcDir) - require.NoError(t, err) + assert.NilError(t, err) } // J. Symbol link following version: @@ -935,7 +935,7 @@ func TestCopyCaseJFSym(t *testing.T) { } err = dirContentsEqual(t, dstDir, linkTarget) - require.NoError(t, err) + assert.NilError(t, err) // Now try again but using a trailing path separator for dstDir. @@ -954,5 +954,5 @@ func TestCopyCaseJFSym(t *testing.T) { } err = dirContentsEqual(t, dstDir, linkTarget) - require.NoError(t, err) + assert.NilError(t, err) } diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go index 6decf8fcc..979536777 100644 --- a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go +++ b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go @@ -6,12 +6,12 @@ import ( "io" "testing" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestGenerateEmptyFile(t *testing.T) { archive, err := Generate("emptyFile") - require.NoError(t, err) + assert.NilError(t, err) if archive == nil { t.Fatal("The generated archive should not be nil.") } @@ -28,7 +28,7 @@ func TestGenerateEmptyFile(t *testing.T) { if err == io.EOF { break } - require.NoError(t, err) + assert.NilError(t, err) buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() @@ -52,7 +52,7 @@ func TestGenerateEmptyFile(t *testing.T) { func TestGenerateWithContent(t *testing.T) { archive, err := Generate("file", "content") - require.NoError(t, err) + assert.NilError(t, err) if archive == nil { t.Fatal("The generated archive should not be nil.") } @@ -69,7 +69,7 @@ func TestGenerateWithContent(t *testing.T) { if err == io.EOF { break } - require.NoError(t, err) + assert.NilError(t, err) buf := new(bytes.Buffer) buf.ReadFrom(tr) content := buf.String() diff --git a/vendor/github.com/docker/docker/pkg/authorization/api_test.go b/vendor/github.com/docker/docker/pkg/authorization/api_test.go index 90a984276..84964d2c5 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/api_test.go +++ b/vendor/github.com/docker/docker/pkg/authorization/api_test.go @@ -11,7 +11,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestPeerCertificateMarshalJSON(t *testing.T) { @@ -32,21 +33,21 @@ func TestPeerCertificateMarshalJSON(t *testing.T) { } // generate private key privatekey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) + assert.NilError(t, err) publickey := &privatekey.PublicKey // create a self-signed certificate. template = parent var parent = template raw, err := x509.CreateCertificate(rand.Reader, template, parent, publickey, privatekey) - require.NoError(t, err) + assert.NilError(t, err) cert, err := x509.ParseCertificate(raw) - require.NoError(t, err) + assert.NilError(t, err) var certs = []*x509.Certificate{cert} addr := "www.authz.com/auth" req, err := http.NewRequest("GET", addr, nil) - require.NoError(t, err) + assert.NilError(t, err) req.RequestURI = addr req.TLS = &tls.ConnectionState{} @@ -58,15 +59,15 @@ func TestPeerCertificateMarshalJSON(t *testing.T) { t.Run("Marshalling :", func(t *testing.T) { raw, err = pcObj.MarshalJSON() - require.NotNil(t, raw) - require.Nil(t, err) + assert.Assert(t, raw != nil) + assert.NilError(t, err) }) t.Run("UnMarshalling :", func(t *testing.T) { err := pcObj.UnmarshalJSON(raw) - require.Nil(t, err) - require.Equal(t, "Earth", pcObj.Subject.Country[0]) - require.Equal(t, true, pcObj.IsCA) + assert.Assert(t, is.Nil(err)) + assert.Equal(t, "Earth", pcObj.Subject.Country[0]) + assert.Equal(t, true, pcObj.IsCA) }) diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go b/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go index 3812d804e..e32e4bf42 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/docker/docker/pkg/plugingetter" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestMiddleware(t *testing.T) { @@ -15,9 +15,9 @@ func TestMiddleware(t *testing.T) { var pluginGetter plugingetter.PluginGetter m := NewMiddleware(pluginNames, pluginGetter) authPlugins := m.getAuthzPlugins() - require.Equal(t, 2, len(authPlugins)) - require.EqualValues(t, pluginNames[0], authPlugins[0].Name()) - require.EqualValues(t, pluginNames[1], authPlugins[1].Name()) + assert.Equal(t, 2, len(authPlugins)) + assert.Equal(t, pluginNames[0], authPlugins[0].Name()) + assert.Equal(t, pluginNames[1], authPlugins[1].Name()) } func TestNewResponseModifier(t *testing.T) { @@ -25,17 +25,17 @@ func TestNewResponseModifier(t *testing.T) { modifier := NewResponseModifier(recorder) modifier.Header().Set("H1", "V1") modifier.Write([]byte("body")) - require.False(t, modifier.Hijacked()) + assert.Assert(t, !modifier.Hijacked()) modifier.WriteHeader(http.StatusInternalServerError) - require.NotNil(t, modifier.RawBody()) + assert.Assert(t, modifier.RawBody() != nil) raw, err := modifier.RawHeaders() - require.NotNil(t, raw) - require.Nil(t, err) + assert.Assert(t, raw != nil) + assert.NilError(t, err) headerData := strings.Split(strings.TrimSpace(string(raw)), ":") - require.EqualValues(t, "H1", strings.TrimSpace(headerData[0])) - require.EqualValues(t, "V1", strings.TrimSpace(headerData[1])) + assert.Equal(t, "H1", strings.TrimSpace(headerData[0])) + assert.Equal(t, "V1", strings.TrimSpace(headerData[1])) modifier.Flush() modifier.FlushAll() diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go b/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go index 257832b33..ddfe9d958 100644 --- a/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/authorization/middleware_unix_test.go @@ -8,7 +8,8 @@ import ( "testing" "github.com/docker/docker/pkg/plugingetter" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -30,7 +31,7 @@ func TestMiddlewareWrapHandler(t *testing.T) { middleWare.SetPlugins([]string{"My Test Plugin"}) setAuthzPlugins(middleWare, authList) mdHandler := middleWare.WrapHandler(handler) - require.NotNil(t, mdHandler) + assert.Assert(t, mdHandler != nil) addr := "www.example.com/auth" req, _ := http.NewRequest("GET", addr, nil) @@ -46,7 +47,7 @@ func TestMiddlewareWrapHandler(t *testing.T) { Msg: "Server Auth Not Allowed", } if err := mdHandler(ctx, resp, req, map[string]string{}); err == nil { - require.Error(t, err) + assert.Assert(t, is.ErrorContains(err, "")) } }) @@ -57,7 +58,7 @@ func TestMiddlewareWrapHandler(t *testing.T) { Msg: "Server Auth Allowed", } if err := mdHandler(ctx, resp, req, map[string]string{}); err != nil { - require.NoError(t, err) + assert.NilError(t, err) } }) diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go index 0a648d146..b167538d5 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -10,8 +10,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) // CopyFile with invalid src @@ -384,9 +384,9 @@ func TestMatches(t *testing.T) { for _, test := range tests { desc := fmt.Sprintf("pattern=%q text=%q", test.pattern, test.text) pm, err := NewPatternMatcher([]string{test.pattern}) - require.NoError(t, err, desc) + assert.NilError(t, err, desc) res, _ := pm.Matches(test.text) - assert.Equal(t, test.pass, res, desc) + assert.Check(t, is.Equal(test.pass, res), desc) } } diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go index 931e332bb..e493b9e8d 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go @@ -10,9 +10,9 @@ import ( "path/filepath" "testing" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/gotestyourself/gotestyourself/skip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "golang.org/x/sys/unix" ) @@ -89,7 +89,7 @@ func TestMkdirAllAndChown(t *testing.T) { func TestMkdirAllAndChownNew(t *testing.T) { RequiresRoot(t) dirName, err := ioutil.TempDir("", "mkdirnew") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(dirName) testTree := map[string]node{ @@ -99,32 +99,32 @@ func TestMkdirAllAndChownNew(t *testing.T) { "lib/x86_64": {45, 45}, "lib/x86_64/share": {1, 1}, } - require.NoError(t, buildTree(dirName, testTree)) + assert.NilError(t, buildTree(dirName, testTree)) // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid err = MkdirAllAndChownNew(filepath.Join(dirName, "usr", "share"), 0755, IDPair{UID: 99, GID: 99}) - require.NoError(t, err) + assert.NilError(t, err) testTree["usr/share"] = node{99, 99} verifyTree, err := readTree(dirName, "") - require.NoError(t, err) - require.NoError(t, compareTrees(testTree, verifyTree)) + assert.NilError(t, err) + assert.NilError(t, compareTrees(testTree, verifyTree)) // test 2-deep new directories--both should be owned by the uid/gid pair err = MkdirAllAndChownNew(filepath.Join(dirName, "lib", "some", "other"), 0755, IDPair{UID: 101, GID: 101}) - require.NoError(t, err) + assert.NilError(t, err) testTree["lib/some"] = node{101, 101} testTree["lib/some/other"] = node{101, 101} verifyTree, err = readTree(dirName, "") - require.NoError(t, err) - require.NoError(t, compareTrees(testTree, verifyTree)) + assert.NilError(t, err) + assert.NilError(t, compareTrees(testTree, verifyTree)) // test a directory that already exists; should NOT be chowned err = MkdirAllAndChownNew(filepath.Join(dirName, "usr"), 0755, IDPair{UID: 102, GID: 102}) - require.NoError(t, err) + assert.NilError(t, err) verifyTree, err = readTree(dirName, "") - require.NoError(t, err) - require.NoError(t, compareTrees(testTree, verifyTree)) + assert.NilError(t, err) + assert.NilError(t, compareTrees(testTree, verifyTree)) } func TestMkdirAndChown(t *testing.T) { @@ -235,7 +235,7 @@ func compareTrees(left, right map[string]node) error { func delUser(t *testing.T, name string) { _, err := execCmd("userdel", name) - assert.NoError(t, err) + assert.Check(t, err) } func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { @@ -283,9 +283,9 @@ func TestGetRootUIDGID(t *testing.T) { } uid, gid, err := GetRootUIDGID(uidMap, gidMap) - assert.NoError(t, err) - assert.Equal(t, os.Getegid(), uid) - assert.Equal(t, os.Getegid(), gid) + assert.Check(t, err) + assert.Check(t, is.Equal(os.Getegid(), uid)) + assert.Check(t, is.Equal(os.Getegid(), gid)) uidMapError := []IDMap{ { @@ -295,7 +295,7 @@ func TestGetRootUIDGID(t *testing.T) { }, } _, _, err = GetRootUIDGID(uidMapError, gidMap) - assert.EqualError(t, err, "Container ID 0 cannot be mapped to a host ID") + assert.Check(t, is.Error(err, "Container ID 0 cannot be mapped to a host ID")) } func TestToContainer(t *testing.T) { @@ -308,74 +308,74 @@ func TestToContainer(t *testing.T) { } containerID, err := toContainer(2, uidMap) - assert.NoError(t, err) - assert.Equal(t, uidMap[0].ContainerID, containerID) + assert.Check(t, err) + assert.Check(t, is.Equal(uidMap[0].ContainerID, containerID)) } func TestNewIDMappings(t *testing.T) { RequiresRoot(t) _, _, err := AddNamespaceRangesUser(tempUser) - assert.NoError(t, err) + assert.Check(t, err) defer delUser(t, tempUser) tempUser, err := user.Lookup(tempUser) - assert.NoError(t, err) + assert.Check(t, err) gids, err := tempUser.GroupIds() - assert.NoError(t, err) + assert.Check(t, err) group, err := user.LookupGroupId(string(gids[0])) - assert.NoError(t, err) + assert.Check(t, err) idMappings, err := NewIDMappings(tempUser.Username, group.Name) - assert.NoError(t, err) + assert.Check(t, err) rootUID, rootGID, err := GetRootUIDGID(idMappings.UIDs(), idMappings.GIDs()) - assert.NoError(t, err) + assert.Check(t, err) dirName, err := ioutil.TempDir("", "mkdirall") - assert.NoError(t, err, "Couldn't create temp directory") + assert.Check(t, err, "Couldn't create temp directory") defer os.RemoveAll(dirName) err = MkdirAllAndChown(dirName, 0700, IDPair{UID: rootUID, GID: rootGID}) - assert.NoError(t, err, "Couldn't change ownership of file path. Got error") - assert.True(t, CanAccess(dirName, idMappings.RootPair()), fmt.Sprintf("Unable to access %s directory with user UID:%d and GID:%d", dirName, rootUID, rootGID)) + assert.Check(t, err, "Couldn't change ownership of file path. Got error") + assert.Check(t, CanAccess(dirName, idMappings.RootPair()), fmt.Sprintf("Unable to access %s directory with user UID:%d and GID:%d", dirName, rootUID, rootGID)) } func TestLookupUserAndGroup(t *testing.T) { RequiresRoot(t) uid, gid, err := AddNamespaceRangesUser(tempUser) - assert.NoError(t, err) + assert.Check(t, err) defer delUser(t, tempUser) fetchedUser, err := LookupUser(tempUser) - assert.NoError(t, err) + assert.Check(t, err) fetchedUserByID, err := LookupUID(uid) - assert.NoError(t, err) - assert.Equal(t, fetchedUserByID, fetchedUser) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(fetchedUserByID, fetchedUser)) fetchedGroup, err := LookupGroup(tempUser) - assert.NoError(t, err) + assert.Check(t, err) fetchedGroupByID, err := LookupGID(gid) - assert.NoError(t, err) - assert.Equal(t, fetchedGroupByID, fetchedGroup) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(fetchedGroupByID, fetchedGroup)) } func TestLookupUserAndGroupThatDoesNotExist(t *testing.T) { fakeUser := "fakeuser" _, err := LookupUser(fakeUser) - assert.EqualError(t, err, "getent unable to find entry \""+fakeUser+"\" in passwd database") + assert.Check(t, is.Error(err, "getent unable to find entry \""+fakeUser+"\" in passwd database")) _, err = LookupUID(-1) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) fakeGroup := "fakegroup" _, err = LookupGroup(fakeGroup) - assert.EqualError(t, err, "getent unable to find entry \""+fakeGroup+"\" in group database") + assert.Check(t, is.Error(err, "getent unable to find entry \""+fakeGroup+"\" in group database")) _, err = LookupGID(-1) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) } // TestMkdirIsNotDir checks that mkdirAs() function (used by MkdirAll...) @@ -389,7 +389,7 @@ func TestMkdirIsNotDir(t *testing.T) { defer os.Remove(file.Name()) err = mkdirAs(file.Name(), 0755, 0, 0, false, false) - assert.EqualError(t, err, "mkdir "+file.Name()+": not a directory") + assert.Check(t, is.Error(err, "mkdir "+file.Name()+": not a directory")) } func RequiresRoot(t *testing.T) { diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go index e322fdf84..e009ab26f 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go @@ -7,7 +7,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "golang.org/x/net/context" ) @@ -36,7 +37,7 @@ func TestReaderErrWrapperReadOnError(t *testing.T) { called = true }) _, err := wrapper.Read([]byte{}) - assert.EqualError(t, err, "error reader always fail") + assert.Check(t, is.Error(err, "error reader always fail")) if !called { t.Fatalf("readErrWrapper should have call the anonymous function on failure") } diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 368f12894..b9f40d3ef 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -40,21 +40,17 @@ type JSONProgress struct { // If true, don't show xB/yB HideCounts bool `json:"hidecounts,omitempty"` Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int } func (p *JSONProgress) String() string { var ( - width = 200 + width = p.width() pbBox string numbersBox string timeLeftBox string ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - if p.Current <= 0 && p.Total <= 0 { return "" } @@ -103,7 +99,7 @@ func (p *JSONProgress) String() string { } if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) + fromStart := p.now().Sub(time.Unix(p.Start, 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second @@ -115,6 +111,28 @@ func (p *JSONProgress) String() string { return pbBox + numbersBox + timeLeftBox } +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + // JSONMessage defines a message struct. It describes // the created time, where it from, status, ID of the // message. It's used for docker events. diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go index 2bad8a202..f9ead207c 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go @@ -9,89 +9,108 @@ import ( "time" "github.com/docker/docker/pkg/term" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestError(t *testing.T) { je := JSONError{404, "Not found"} - if je.Error() != "Not found" { - t.Fatalf("Expected 'Not found' got '%s'", je.Error()) - } + assert.Assert(t, is.Error(&je, "Not found")) } -func TestProgress(t *testing.T) { - termsz, err := term.GetWinsize(0) - if err != nil { - // we can safely ignore the err here - termsz = nil - } - jp := JSONProgress{} - if jp.String() != "" { - t.Fatalf("Expected empty string, got '%s'", jp.String()) +func TestProgressString(t *testing.T) { + type expected struct { + short string + long string } - expected := " 1B" - jp2 := JSONProgress{Current: 1} - if jp2.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp2.String()) + shortAndLong := func(short, long string) expected { + return expected{short: short, long: long} } - expectedStart := "[==========> ] 20B/100B" - if termsz != nil && termsz.Width <= 110 { - expectedStart = " 20B/100B" - } - jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} - // Just look at the start of the string - // (the remaining time is really hard to test -_-) - if jp3.String()[:len(expectedStart)] != expectedStart { - t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) + start := time.Date(2017, 12, 3, 15, 10, 1, 0, time.UTC) + timeAfter := func(delta time.Duration) func() time.Time { + return func() time.Time { + return start.Add(delta) + } } - expected = "[=========================> ] 50B/100B" - if termsz != nil && termsz.Width <= 110 { - expected = " 50B/100B" - } - jp4 := JSONProgress{Current: 50, Total: 100} - if jp4.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp4.String()) + var testcases = []struct { + name string + progress JSONProgress + expected expected + }{ + { + name: "no progress", + }, + { + name: "progress 1", + progress: JSONProgress{Current: 1}, + expected: shortAndLong(" 1B", " 1B"), + }, + { + name: "some progress with a start time", + progress: JSONProgress{ + Current: 20, + Total: 100, + Start: start.Unix(), + nowFunc: timeAfter(time.Second), + }, + expected: shortAndLong( + " 20B/100B 4s", + "[==========> ] 20B/100B 4s", + ), + }, + { + name: "some progress without a start time", + progress: JSONProgress{Current: 50, Total: 100}, + expected: shortAndLong( + " 50B/100B", + "[=========================> ] 50B/100B", + ), + }, + { + name: "current more than total is not negative gh#7136", + progress: JSONProgress{Current: 50, Total: 40}, + expected: shortAndLong( + " 50B", + "[==================================================>] 50B", + ), + }, + { + name: "with units", + progress: JSONProgress{Current: 50, Total: 100, Units: "units"}, + expected: shortAndLong( + "50/100 units", + "[=========================> ] 50/100 units", + ), + }, + { + name: "current more than total with units is not negative ", + progress: JSONProgress{Current: 50, Total: 40, Units: "units"}, + expected: shortAndLong( + "50 units", + "[==================================================>] 50 units", + ), + }, + { + name: "hide counts", + progress: JSONProgress{Current: 50, Total: 100, HideCounts: true}, + expected: shortAndLong( + "", + "[=========================> ] ", + ), + }, } - // this number can't be negative gh#7136 - expected = "[==================================================>] 50B" - if termsz != nil && termsz.Width <= 110 { - expected = " 50B" - } - jp5 := JSONProgress{Current: 50, Total: 40} - if jp5.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp5.String()) - } + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + testcase.progress.winSize = 100 + assert.Equal(t, testcase.progress.String(), testcase.expected.short) - expected = "[=========================> ] 50/100 units" - if termsz != nil && termsz.Width <= 110 { - expected = " 50/100 units" - } - jp6 := JSONProgress{Current: 50, Total: 100, Units: "units"} - if jp6.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp6.String()) - } - - // this number can't be negative - expected = "[==================================================>] 50 units" - if termsz != nil && termsz.Width <= 110 { - expected = " 50 units" - } - jp7 := JSONProgress{Current: 50, Total: 40, Units: "units"} - if jp7.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp7.String()) - } - - expected = "[=========================> ] " - if termsz != nil && termsz.Width <= 110 { - expected = "" - } - jp8 := JSONProgress{Current: 50, Total: 100, HideCounts: true} - if jp8.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp8.String()) + testcase.progress.winSize = 200 + assert.Equal(t, testcase.progress.String(), testcase.expected.long) + }) } } @@ -198,7 +217,7 @@ func TestJSONMessageDisplayWithJSONError(t *testing.T) { jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} err = jsonMessage.Display(data, &noTermInfo{}) - assert.EqualError(t, err, "authentication is required") + assert.Check(t, is.Error(err, "authentication is required")) } func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go index d54f032cb..b7b15a1fd 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -2,9 +2,9 @@ package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "fmt" - "unsafe" "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) // VersionInfo holds information about the kernel. @@ -22,41 +22,24 @@ func (k *VersionInfo) String() string { // GetKernelVersion gets the current kernel version. func GetKernelVersion() (*VersionInfo, error) { - var ( - h windows.Handle - dwVersion uint32 - err error - ) - KVI := &VersionInfo{"Unknown", 0, 0, 0} - if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - windows.KEY_READ, - &h); err != nil { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { return KVI, err } - defer windows.RegCloseKey(h) + defer k.Close() - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { + blex, _, err := k.GetStringValue("BuildLabEx") + if err != nil { return KVI, err } - - KVI.kvi = windows.UTF16ToString(buf[:]) + KVI.kvi = blex // Important - docker.exe MUST be manifested for this API to return // the correct information. - if dwVersion, err = windows.GetVersion(); err != nil { + dwVersion, err := windows.GetVersion() + if err != nil { return KVI, err } diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go index aec4cce72..372de5146 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go @@ -12,7 +12,7 @@ func GetOperatingSystem() (string, error) { // Default return value ret := "Unknown Operating System" - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\WIndows NT\CurrentVersion`, registry.QUERY_VALUE) + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) if err != nil { return ret, err } diff --git a/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/vendor/github.com/docker/docker/pkg/plugins/client_test.go index 10c8d8fd5..d420010f1 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/client_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/client_test.go @@ -14,9 +14,9 @@ import ( "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/tlsconfig" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var ( @@ -88,7 +88,7 @@ func TestEchoInputOutput(t *testing.T) { t.Fatal(err) } - assert.Equal(t, m, output) + assert.Check(t, is.DeepEqual(m, output)) err = c.Call("Test.Echo", nil, nil) if err != nil { t.Fatal(err) @@ -205,7 +205,7 @@ func TestClientStream(t *testing.T) { if err := json.NewDecoder(body).Decode(&output); err != nil { t.Fatalf("Test.Echo: error reading plugin resp: %v", err) } - assert.Equal(t, m, output) + assert.Check(t, is.DeepEqual(m, output)) } func TestClientSendFile(t *testing.T) { @@ -233,7 +233,7 @@ func TestClientSendFile(t *testing.T) { if err := c.SendFile("Test.Echo", &buf, &output); err != nil { t.Fatal(err) } - assert.Equal(t, m, output) + assert.Check(t, is.DeepEqual(m, output)) } func TestClientWithRequestTimeout(t *testing.T) { @@ -248,7 +248,7 @@ func TestClientWithRequestTimeout(t *testing.T) { client := &Client{http: srv.Client(), requestFactory: &testRequestWrapper{srv}} _, err := client.callWithRetry("/Plugin.Hello", nil, false, WithRequestTimeout(timeout)) - require.Error(t, err, "expected error") + assert.Assert(t, is.ErrorContains(err, ""), "expected error") err = errors.Cause(err) @@ -256,7 +256,7 @@ func TestClientWithRequestTimeout(t *testing.T) { case *url.Error: err = e.Err } - require.Equal(t, context.DeadlineExceeded, err) + assert.DeepEqual(t, context.DeadlineExceeded, err) } type testRequestWrapper struct { diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go index 9212946b2..2c718d8be 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go @@ -11,7 +11,7 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func TestLocalSocket(t *testing.T) { @@ -91,7 +91,7 @@ func TestScan(t *testing.T) { r := newLocalRegistry() p, err := r.Plugin(name) - require.NoError(t, err) + assert.NilError(t, err) pluginNamesNotEmpty, err := Scan() if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go index 1540a19a6..ca8d59840 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugin_test.go @@ -14,8 +14,8 @@ import ( "github.com/docker/docker/pkg/plugins/transport" "github.com/docker/go-connections/tlsconfig" + "github.com/gotestyourself/gotestyourself/assert" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" ) const ( @@ -54,7 +54,6 @@ func testActive(t *testing.T, p *Plugin) { t.Fatalf("%s:%d: deadlock in waitActive", filepath.Base(f), l) case <-done: } - } func TestGet(t *testing.T) { @@ -83,7 +82,6 @@ func TestGet(t *testing.T) { // check negative case where plugin vegetable doesn't exist _, err = Get("vegetable", "potato") assert.Equal(t, errors.Cause(err), ErrNotFound) - } func TestPluginWithNoManifest(t *testing.T) { diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go index 2e48b0fe0..081f60424 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http_test.go @@ -5,7 +5,8 @@ import ( "net/http" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestHTTPTransport(t *testing.T) { @@ -16,5 +17,5 @@ func TestHTTPTransport(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, "POST", request.Method) + assert.Check(t, is.Equal("POST", request.Method)) } diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go index 2dbea36ae..76015169d 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools_test.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools_test.go @@ -7,8 +7,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { @@ -95,16 +95,16 @@ func TestBufioWriterPoolPutAndGet(t *testing.T) { buf := new(bytes.Buffer) bw := bufio.NewWriter(buf) writer := BufioWriter32KPool.Get(bw) - require.NotNil(t, writer) + assert.Assert(t, writer != nil) written, err := writer.Write([]byte("foobar")) - require.NoError(t, err) - assert.Equal(t, 6, written) + assert.NilError(t, err) + assert.Check(t, is.Equal(6, written)) // Make sure we Flush all the way ? writer.Flush() bw.Flush() - assert.Len(t, buf.Bytes(), 6) + assert.Check(t, is.Len(buf.Bytes(), 6)) // Reset the buffer buf.Reset() BufioWriter32KPool.Put(writer) diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go b/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go index e6bbe9f9a..90aa01a39 100644 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec_test.go @@ -5,8 +5,7 @@ import ( "os/exec" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) func init() { @@ -19,7 +18,7 @@ func init() { func TestRegister(t *testing.T) { defer func() { if r := recover(); r != nil { - require.Equal(t, `reexec func already registered under name "reexec"`, r) + assert.Equal(t, `reexec func already registered under name "reexec"`, r) } }() Register("reexec", func() {}) @@ -28,13 +27,13 @@ func TestRegister(t *testing.T) { func TestCommand(t *testing.T) { cmd := Command("reexec") w, err := cmd.StdinPipe() - require.NoError(t, err, "Error on pipe creation: %v", err) + assert.NilError(t, err, "Error on pipe creation: %v", err) defer w.Close() err = cmd.Start() - require.NoError(t, err, "Error on re-exec cmd: %v", err) + assert.NilError(t, err, "Error on re-exec cmd: %v", err) err = cmd.Wait() - require.EqualError(t, err, "exit status 2") + assert.Error(t, err, "exit status 2") } func TestNaiveSelf(t *testing.T) { @@ -44,10 +43,10 @@ func TestNaiveSelf(t *testing.T) { cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") cmd.Env = append(os.Environ(), "TEST_CHECK=1") err := cmd.Start() - require.NoError(t, err, "Unable to start command") + assert.NilError(t, err, "Unable to start command") err = cmd.Wait() - require.EqualError(t, err, "exit status 2") + assert.Error(t, err, "exit status 2") os.Args[0] = "mkdir" - assert.NotEqual(t, naiveSelf(), os.Args[0]) + assert.Check(t, naiveSelf() != os.Args[0]) } diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go index d7e8da252..71c577ed6 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux_test.go @@ -8,7 +8,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestCatchAll(t *testing.T) { @@ -34,7 +35,7 @@ func TestCatchAll(t *testing.T) { }() s := <-sigs - assert.EqualValues(t, s.String(), signal.String()) + assert.Check(t, is.Equal(s.String(), signal.String())) } } @@ -50,9 +51,9 @@ func TestStopCatch(t *testing.T) { syscall.Kill(syscall.Getpid(), signal) }() signalString := <-channel - assert.EqualValues(t, signalString.String(), signal.String()) + assert.Check(t, is.Equal(signalString.String(), signal.String())) StopCatch(channel) _, ok := <-channel - assert.EqualValues(t, ok, false) + assert.Check(t, is.Equal(ok, false)) } diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_test.go b/vendor/github.com/docker/docker/pkg/signal/signal_test.go index 1add526d1..bbf3736fc 100644 --- a/vendor/github.com/docker/docker/pkg/signal/signal_test.go +++ b/vendor/github.com/docker/docker/pkg/signal/signal_test.go @@ -4,30 +4,31 @@ import ( "syscall" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestParseSignal(t *testing.T) { _, checkAtoiError := ParseSignal("0") - assert.EqualError(t, checkAtoiError, "Invalid signal: 0") + assert.Check(t, is.Error(checkAtoiError, "Invalid signal: 0")) _, error := ParseSignal("SIG") - assert.EqualError(t, error, "Invalid signal: SIG") + assert.Check(t, is.Error(error, "Invalid signal: SIG")) for sigStr := range SignalMap { responseSignal, error := ParseSignal(sigStr) - assert.NoError(t, error) + assert.Check(t, error) signal := SignalMap[sigStr] - assert.EqualValues(t, signal, responseSignal) + assert.Check(t, is.DeepEqual(signal, responseSignal)) } } func TestValidSignalForPlatform(t *testing.T) { isValidSignal := ValidSignalForPlatform(syscall.Signal(0)) - assert.EqualValues(t, false, isValidSignal) + assert.Check(t, is.Equal(false, isValidSignal)) for _, sigN := range SignalMap { isValidSignal = ValidSignalForPlatform(syscall.Signal(sigN)) - assert.EqualValues(t, true, isValidSignal) + assert.Check(t, is.Equal(true, isValidSignal)) } } diff --git a/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go b/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go index d32a4366f..a3afe7a7b 100644 --- a/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/signal/trap_linux_test.go @@ -10,19 +10,19 @@ import ( "syscall" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func buildTestBinary(t *testing.T, tmpdir string, prefix string) (string, string) { tmpDir, err := ioutil.TempDir(tmpdir, prefix) - require.NoError(t, err) + assert.NilError(t, err) exePath := tmpDir + "/" + prefix wd, _ := os.Getwd() testHelperCode := wd + "/testfiles/main.go" cmd := exec.Command("go", "build", "-o", exePath, testHelperCode) err = cmd.Run() - require.NoError(t, err) + assert.NilError(t, err) return exePath, tmpDir } @@ -48,14 +48,14 @@ func TestTrap(t *testing.T) { cmd.Env = append(cmd.Env, "IF_MULTIPLE=1") } err := cmd.Start() - require.NoError(t, err) + assert.NilError(t, err) err = cmd.Wait() if e, ok := err.(*exec.ExitError); ok { code := e.Sys().(syscall.WaitStatus).ExitStatus() if v.multiple { - assert.Equal(t, 128+int(v.signal.(syscall.Signal)), code) + assert.Check(t, is.DeepEqual(128+int(v.signal.(syscall.Signal)), code)) } else { - assert.Equal(t, 99, code) + assert.Check(t, is.Equal(99, code)) } continue } @@ -66,17 +66,17 @@ func TestTrap(t *testing.T) { func TestDumpStacks(t *testing.T) { directory, err := ioutil.TempDir("", "test-dump-tasks") - assert.NoError(t, err) + assert.Check(t, err) defer os.RemoveAll(directory) dumpPath, err := DumpStacks(directory) - assert.NoError(t, err) + assert.Check(t, err) readFile, _ := ioutil.ReadFile(dumpPath) fileData := string(readFile) - assert.Contains(t, fileData, "goroutine") + assert.Check(t, is.Contains(fileData, "goroutine")) } func TestDumpStacksWithEmptyInput(t *testing.T) { path, err := DumpStacks("") - assert.NoError(t, err) - assert.Equal(t, os.Stderr.Name(), path) + assert.Check(t, err) + assert.Check(t, is.Equal(os.Stderr.Name(), path)) } diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go index 7259c54df..172d568bd 100644 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go @@ -8,14 +8,16 @@ import ( "testing" "github.com/docker/docker/pkg/jsonmessage" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestRawProgressFormatterFormatStatus(t *testing.T) { sf := rawProgressFormatter{} res := sf.formatStatus("ID", "%s%d", "a", 1) - assert.Equal(t, "a1\r\n", string(res)) + assert.Check(t, is.Equal("a1\r\n", string(res))) } func TestRawProgressFormatterFormatProgress(t *testing.T) { @@ -27,28 +29,28 @@ func TestRawProgressFormatterFormatProgress(t *testing.T) { } res := sf.formatProgress("id", "action", jsonProgress, nil) out := string(res) - assert.True(t, strings.HasPrefix(out, "action [====")) - assert.Contains(t, out, "15B/30B") - assert.True(t, strings.HasSuffix(out, "\r")) + assert.Check(t, strings.HasPrefix(out, "action [====")) + assert.Check(t, is.Contains(out, "15B/30B")) + assert.Check(t, strings.HasSuffix(out, "\r")) } func TestFormatStatus(t *testing.T) { res := FormatStatus("ID", "%s%d", "a", 1) expected := `{"status":"a1","id":"ID"}` + streamNewline - assert.Equal(t, expected, string(res)) + assert.Check(t, is.Equal(expected, string(res))) } func TestFormatError(t *testing.T) { res := FormatError(errors.New("Error for formatter")) expected := `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}` + "\r\n" - assert.Equal(t, expected, string(res)) + assert.Check(t, is.Equal(expected, string(res))) } func TestFormatJSONError(t *testing.T) { err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} res := FormatError(err) expected := `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}` + streamNewline - assert.Equal(t, expected, string(res)) + assert.Check(t, is.Equal(expected, string(res))) } func TestJsonProgressFormatterFormatProgress(t *testing.T) { @@ -58,43 +60,44 @@ func TestJsonProgressFormatterFormatProgress(t *testing.T) { Total: 30, Start: 1, } - res := sf.formatProgress("id", "action", jsonProgress, &AuxFormatter{Writer: &bytes.Buffer{}}) + aux := "aux message" + res := sf.formatProgress("id", "action", jsonProgress, aux) msg := &jsonmessage.JSONMessage{} - require.NoError(t, json.Unmarshal(res, msg)) - assert.Equal(t, "id", msg.ID) - assert.Equal(t, "action", msg.Status) + assert.NilError(t, json.Unmarshal(res, msg)) - // jsonProgress will always be in the format of: - // [=========================> ] 15B/30B 412910h51m30s - // The last entry '404933h7m11s' is the timeLeftBox. - // However, the timeLeftBox field may change as jsonProgress.String() depends on time.Now(). - // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. - - // Compare the jsonProgress strings before the timeLeftBox - expectedProgress := "[=========================> ] 15B/30B" - // if terminal column is <= 110, expectedProgressShort is expected. - expectedProgressShort := " 15B/30B" - if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || - strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { - t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", - expectedProgress, expectedProgressShort, msg.ProgressMessage) + rawAux := json.RawMessage(`"` + aux + `"`) + expected := &jsonmessage.JSONMessage{ + ID: "id", + Status: "action", + Aux: &rawAux, + Progress: jsonProgress, } + assert.DeepEqual(t, msg, expected, cmpJSONMessageOpt()) +} - assert.Equal(t, jsonProgress, msg.Progress) +func cmpJSONMessageOpt() cmp.Option { + progressMessagePath := func(path cmp.Path) bool { + return path.String() == "ProgressMessage" + } + return cmp.Options{ + cmpopts.IgnoreUnexported(jsonmessage.JSONProgress{}), + // Ignore deprecated property that is a derivative of Progress + cmp.FilterPath(progressMessagePath, cmp.Ignore()), + } } func TestJsonProgressFormatterFormatStatus(t *testing.T) { sf := jsonProgressFormatter{} res := sf.formatStatus("ID", "%s%d", "a", 1) - assert.Equal(t, `{"status":"a1","id":"ID"}`+streamNewline, string(res)) + assert.Check(t, is.Equal(`{"status":"a1","id":"ID"}`+streamNewline, string(res))) } func TestNewJSONProgressOutput(t *testing.T) { b := bytes.Buffer{} b.Write(FormatStatus("id", "Downloading")) _ = NewJSONProgressOutput(&b, false) - assert.Equal(t, `{"status":"Downloading","id":"id"}`+streamNewline, b.String()) + assert.Check(t, is.Equal(`{"status":"Downloading","id":"id"}`+streamNewline, b.String())) } func TestAuxFormatterEmit(t *testing.T) { @@ -104,6 +107,6 @@ func TestAuxFormatterEmit(t *testing.T) { Data string }{"Additional data"} err := aux.Emit(sampleAux) - require.NoError(t, err) - assert.Equal(t, `{"aux":{"Data":"Additional data"}}`+streamNewline, b.String()) + assert.NilError(t, err) + assert.Check(t, is.Equal(`{"aux":{"Data":"Additional data"}}`+streamNewline, b.String())) } diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go index 332d66414..b74d6fb2d 100644 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter_test.go @@ -4,8 +4,8 @@ import ( "bytes" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestStreamWriterStdout(t *testing.T) { @@ -14,11 +14,11 @@ func TestStreamWriterStdout(t *testing.T) { sw := NewStdoutWriter(buffer) size, err := sw.Write([]byte(content)) - require.NoError(t, err) - assert.Equal(t, len(content), size) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(content), size)) expected := `{"stream":"content"}` + streamNewline - assert.Equal(t, expected, buffer.String()) + assert.Check(t, is.Equal(expected, buffer.String())) } func TestStreamWriterStderr(t *testing.T) { @@ -27,9 +27,9 @@ func TestStreamWriterStderr(t *testing.T) { sw := NewStderrWriter(buffer) size, err := sw.Write([]byte(content)) - require.NoError(t, err) - assert.Equal(t, len(content), size) + assert.NilError(t, err) + assert.Check(t, is.Equal(len(content), size)) expected := `{"stream":"\u001b[91mcontent\u001b[0m"}` + streamNewline - assert.Equal(t, expected, buffer.String()) + assert.Check(t, is.Equal(expected, buffer.String())) } diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go index a798bf6e8..e8a12a35c 100644 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go @@ -7,18 +7,18 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" "golang.org/x/sys/unix" ) func TestReadProcBool(t *testing.T) { tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmpDir) procFile := filepath.Join(tmpDir, "read-proc-bool") err = ioutil.WriteFile(procFile, []byte("1"), 0644) - require.NoError(t, err) + assert.NilError(t, err) if !readProcBool(procFile) { t.Fatal("expected proc bool to be true, got false") @@ -39,7 +39,7 @@ func TestReadProcBool(t *testing.T) { func TestCgroupEnabled(t *testing.T) { cgroupDir, err := ioutil.TempDir("", "cgroup-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(cgroupDir) if cgroupEnabled(cgroupDir, "test") { @@ -47,7 +47,7 @@ func TestCgroupEnabled(t *testing.T) { } err = ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 0644) - require.NoError(t, err) + assert.NilError(t, err) if !cgroupEnabled(cgroupDir, "test") { t.Fatal("cgroupEnabled should be true") @@ -56,11 +56,11 @@ func TestCgroupEnabled(t *testing.T) { func TestNew(t *testing.T) { sysInfo := New(false) - require.NotNil(t, sysInfo) + assert.Assert(t, sysInfo != nil) checkSysInfo(t, sysInfo) sysInfo = New(true) - require.NotNil(t, sysInfo) + assert.Assert(t, sysInfo != nil) checkSysInfo(t, sysInfo) } @@ -69,10 +69,10 @@ func checkSysInfo(t *testing.T, sysInfo *SysInfo) { if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { // Make sure the kernel has CONFIG_SECCOMP_FILTER. if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { - require.True(t, sysInfo.Seccomp) + assert.Assert(t, sysInfo.Seccomp) } } else { - require.False(t, sysInfo.Seccomp) + assert.Assert(t, !sysInfo.Seccomp) } } @@ -83,7 +83,7 @@ func TestNewAppArmorEnabled(t *testing.T) { } sysInfo := New(true) - require.True(t, sysInfo.AppArmor) + assert.Assert(t, sysInfo.AppArmor) } func TestNewAppArmorDisabled(t *testing.T) { @@ -93,7 +93,7 @@ func TestNewAppArmorDisabled(t *testing.T) { } sysInfo := New(true) - require.False(t, sysInfo.AppArmor) + assert.Assert(t, !sysInfo.AppArmor) } func TestNumCPU(t *testing.T) { diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go index 12687b33c..fd68a9665 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go @@ -7,7 +7,7 @@ import ( "syscall" "testing" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" ) // TestFromStatT tests fromStatT for a tempfile @@ -17,10 +17,10 @@ func TestFromStatT(t *testing.T) { stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) - require.NoError(t, err) + assert.NilError(t, err) s, err := fromStatT(stat) - require.NoError(t, err) + assert.NilError(t, err) if stat.Mode != s.Mode() { t.Fatal("got invalid mode") diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go index 466eda0ae..35f08ebce 100644 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go @@ -17,8 +17,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) type testLayer struct { @@ -225,13 +225,13 @@ func TestNewTarSumForLabel(t *testing.T) { func TestEmptyTar(t *testing.T) { // Test without gzip. ts, err := emptyTarSum(false) - require.NoError(t, err) + assert.NilError(t, err) zeroBlock := make([]byte, 1024) buf := new(bytes.Buffer) n, err := io.Copy(buf, ts) - require.NoError(t, err) + assert.NilError(t, err) if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) @@ -246,16 +246,16 @@ func TestEmptyTar(t *testing.T) { // Test with gzip. ts, err = emptyTarSum(true) - require.NoError(t, err) + assert.NilError(t, err) buf.Reset() _, err = io.Copy(buf, ts) - require.NoError(t, err) + assert.NilError(t, err) bufgz := new(bytes.Buffer) gz := gzip.NewWriter(bufgz) n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) - require.NoError(t, err) + assert.NilError(t, err) gz.Close() gzBytes := bufgz.Bytes() @@ -275,7 +275,7 @@ func TestEmptyTar(t *testing.T) { } resultSum = ts.Sum(nil) - assert.Equal(t, expectedSum, resultSum) + assert.Check(t, is.Equal(expectedSum, resultSum)) } var ( diff --git a/vendor/github.com/docker/docker/pkg/term/ascii_test.go b/vendor/github.com/docker/docker/pkg/term/ascii_test.go index e426de35b..321d1b87d 100644 --- a/vendor/github.com/docker/docker/pkg/term/ascii_test.go +++ b/vendor/github.com/docker/docker/pkg/term/ascii_test.go @@ -3,23 +3,23 @@ package term // import "github.com/docker/docker/pkg/term" import ( "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestToBytes(t *testing.T) { codes, err := ToBytes("ctrl-a,a") - require.NoError(t, err) - assert.Equal(t, []byte{1, 97}, codes) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]byte{1, 97}, codes)) _, err = ToBytes("shift-z") - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") - require.NoError(t, err) - assert.Equal(t, []byte{0, 27, 126, 15}, codes) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]byte{0, 27, 126, 15}, codes)) codes, err = ToBytes("DEL,+") - require.NoError(t, err) - assert.Equal(t, []byte{127, 43}, codes) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual([]byte{127, 43}, codes)) } diff --git a/vendor/github.com/docker/docker/pkg/term/proxy_test.go b/vendor/github.com/docker/docker/pkg/term/proxy_test.go index ff40c1bef..759be5145 100644 --- a/vendor/github.com/docker/docker/pkg/term/proxy_test.go +++ b/vendor/github.com/docker/docker/pkg/term/proxy_test.go @@ -3,10 +3,10 @@ package term // import "github.com/docker/docker/pkg/term" import ( "bytes" "fmt" - "reflect" "testing" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestEscapeProxyRead(t *testing.T) { @@ -15,78 +15,75 @@ func TestEscapeProxyRead(t *testing.T) { reader := NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf := make([]byte, len(keys)) nr, err := reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) - require.Equal(t, keys, buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr %d should be equal to the number of %d", nr, len(keys))) + assert.DeepEqual(t, keys, buf) keys, _ = ToBytes("") reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf = make([]byte, len(keys)) nr, err = reader.Read(buf) - require.Error(t, err, "Should throw error when no keys are to read") - require.EqualValues(t, nr, 0, "nr should be zero") - require.Condition(t, func() (success bool) { return len(keys) == 0 && len(buf) == 0 }, "keys & the read buffer size should be zero") + assert.Assert(t, is.ErrorContains(err, ""), "Should throw error when no keys are to read") + assert.Equal(t, nr, 0, "nr should be zero") + assert.Check(t, is.Len(keys, 0)) + assert.Check(t, is.Len(buf, 0)) escapeKeys, _ = ToBytes("ctrl-x,ctrl-@") keys, _ = ToBytes("DEL") reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf = make([]byte, len(keys)) nr, err = reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) - require.Equal(t, keys, buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, 1, fmt.Sprintf("nr %d should be equal to the number of 1", nr)) + assert.DeepEqual(t, keys, buf) escapeKeys, _ = ToBytes("ctrl-c") keys, _ = ToBytes("ctrl-c") reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf = make([]byte, len(keys)) nr, err = reader.Read(buf) - require.Condition(t, func() (success bool) { - return reflect.TypeOf(err).Name() == "EscapeError" - }, err) - require.EqualValues(t, nr, 0, "nr should be equal to 0") - require.Equal(t, keys, buf, "keys & the read buffer should be equal") + assert.Error(t, err, "read escape sequence") + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys, buf) escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") keys, _ = ToBytes("ctrl-c,ctrl-z") reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf = make([]byte, 1) nr, err = reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, 0, "nr should be equal to 0") - require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[0:1], buf) nr, err = reader.Read(buf) - require.Condition(t, func() (success bool) { - return reflect.TypeOf(err).Name() == "EscapeError" - }, err) - require.EqualValues(t, nr, 0, "nr should be equal to 0") - require.Equal(t, keys[1:], buf, "keys & the read buffer should be equal") + assert.Error(t, err, "read escape sequence") + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[1:], buf) escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") keys, _ = ToBytes("ctrl-c,DEL,+") reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf = make([]byte, 1) nr, err = reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, 0, "nr should be equal to 0") - require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[0:1], buf) buf = make([]byte, len(keys)) nr, err = reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) - require.Equal(t, keys, buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + assert.DeepEqual(t, keys, buf) escapeKeys, _ = ToBytes("ctrl-c,ctrl-z") keys, _ = ToBytes("ctrl-c,DEL") reader = NewEscapeProxy(bytes.NewReader(keys), escapeKeys) buf = make([]byte, 1) nr, err = reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, 0, "nr should be equal to 0") - require.Equal(t, keys[0:1], buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, 0, "nr should be equal to 0") + assert.DeepEqual(t, keys[0:1], buf) buf = make([]byte, len(keys)) nr, err = reader.Read(buf) - require.NoError(t, err) - require.EqualValues(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) - require.Equal(t, keys, buf, "keys & the read buffer should be equal") + assert.NilError(t, err) + assert.Equal(t, nr, len(keys), fmt.Sprintf("nr should be equal to %d", len(keys))) + assert.DeepEqual(t, keys, buf) } diff --git a/vendor/github.com/docker/docker/pkg/term/term_linux_test.go b/vendor/github.com/docker/docker/pkg/term/term_linux_test.go index 6e42d3edd..4f1d67586 100644 --- a/vendor/github.com/docker/docker/pkg/term/term_linux_test.go +++ b/vendor/github.com/docker/docker/pkg/term/term_linux_test.go @@ -7,7 +7,8 @@ import ( "os" "testing" - "github.com/stretchr/testify/require" + "github.com/google/go-cmp/cmp" + "github.com/gotestyourself/gotestyourself/assert" ) // RequiresRoot skips tests that require root, unless the test.root flag has @@ -31,85 +32,86 @@ func newTempFile() (*os.File, error) { func TestGetWinsize(t *testing.T) { tty, err := newTtyForTest(t) defer tty.Close() - require.NoError(t, err) + assert.NilError(t, err) winSize, err := GetWinsize(tty.Fd()) - require.NoError(t, err) - require.NotNil(t, winSize) - require.NotNil(t, winSize.Height) - require.NotNil(t, winSize.Width) + assert.NilError(t, err) + assert.Assert(t, winSize != nil) + newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} err = SetWinsize(tty.Fd(), &newSize) - require.NoError(t, err) + assert.NilError(t, err) winSize, err = GetWinsize(tty.Fd()) - require.NoError(t, err) - require.Equal(t, *winSize, newSize) + assert.NilError(t, err) + assert.DeepEqual(t, *winSize, newSize, cmpWinsize) } +var cmpWinsize = cmp.AllowUnexported(Winsize{}) + func TestSetWinsize(t *testing.T) { tty, err := newTtyForTest(t) defer tty.Close() - require.NoError(t, err) + assert.NilError(t, err) winSize, err := GetWinsize(tty.Fd()) - require.NoError(t, err) - require.NotNil(t, winSize) + assert.NilError(t, err) + assert.Assert(t, winSize != nil) newSize := Winsize{Width: 200, Height: 200, x: winSize.x, y: winSize.y} err = SetWinsize(tty.Fd(), &newSize) - require.NoError(t, err) + assert.NilError(t, err) winSize, err = GetWinsize(tty.Fd()) - require.NoError(t, err) - require.Equal(t, *winSize, newSize) + assert.NilError(t, err) + assert.DeepEqual(t, *winSize, newSize, cmpWinsize) } func TestGetFdInfo(t *testing.T) { tty, err := newTtyForTest(t) defer tty.Close() - require.NoError(t, err) + assert.NilError(t, err) inFd, isTerminal := GetFdInfo(tty) - require.Equal(t, inFd, tty.Fd()) - require.Equal(t, isTerminal, true) + assert.Equal(t, inFd, tty.Fd()) + assert.Equal(t, isTerminal, true) tmpFile, err := newTempFile() - require.NoError(t, err) + assert.NilError(t, err) defer tmpFile.Close() inFd, isTerminal = GetFdInfo(tmpFile) - require.Equal(t, inFd, tmpFile.Fd()) - require.Equal(t, isTerminal, false) + assert.Equal(t, inFd, tmpFile.Fd()) + assert.Equal(t, isTerminal, false) } func TestIsTerminal(t *testing.T) { tty, err := newTtyForTest(t) defer tty.Close() - require.NoError(t, err) + assert.NilError(t, err) isTerminal := IsTerminal(tty.Fd()) - require.Equal(t, isTerminal, true) + assert.Equal(t, isTerminal, true) tmpFile, err := newTempFile() - require.NoError(t, err) + assert.NilError(t, err) defer tmpFile.Close() isTerminal = IsTerminal(tmpFile.Fd()) - require.Equal(t, isTerminal, false) + assert.Equal(t, isTerminal, false) } func TestSaveState(t *testing.T) { tty, err := newTtyForTest(t) defer tty.Close() - require.NoError(t, err) + assert.NilError(t, err) state, err := SaveState(tty.Fd()) - require.NoError(t, err) - require.NotNil(t, state) + assert.NilError(t, err) + assert.Assert(t, state != nil) tty, err = newTtyForTest(t) - require.NoError(t, err) + assert.NilError(t, err) defer tty.Close() err = RestoreTerminal(tty.Fd(), state) - require.NoError(t, err) + assert.NilError(t, err) } func TestDisableEcho(t *testing.T) { tty, err := newTtyForTest(t) defer tty.Close() - require.NoError(t, err) + assert.NilError(t, err) state, err := SetRawTerminal(tty.Fd()) defer RestoreTerminal(tty.Fd(), state) - require.NoError(t, err) - require.NotNil(t, state) + assert.NilError(t, err) + assert.Assert(t, state != nil) err = DisableEcho(tty.Fd(), state) - require.NoError(t, err) + assert.NilError(t, err) } diff --git a/vendor/github.com/docker/docker/plugin/manager_linux.go b/vendor/github.com/docker/docker/plugin/manager_linux.go index 3fc6be4ce..4988f7424 100644 --- a/vendor/github.com/docker/docker/plugin/manager_linux.go +++ b/vendor/github.com/docker/docker/plugin/manager_linux.go @@ -64,6 +64,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { logrus.Warnf("Could not unmount %s: %v", propRoot, err) } } + return errors.WithStack(err) } return pm.pluginPostStart(p, c) } diff --git a/vendor/github.com/docker/docker/plugin/manager_linux_test.go b/vendor/github.com/docker/docker/plugin/manager_linux_test.go index be5f933c1..396839126 100644 --- a/vendor/github.com/docker/docker/plugin/manager_linux_test.go +++ b/vendor/github.com/docker/docker/plugin/manager_linux_test.go @@ -1,6 +1,7 @@ package plugin // import "github.com/docker/docker/plugin" import ( + "io" "io/ioutil" "os" "path/filepath" @@ -10,6 +11,8 @@ import ( "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/system" "github.com/docker/docker/plugin/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" ) func TestManagerWithPluginMounts(t *testing.T) { @@ -77,3 +80,58 @@ func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin { return &p } + +type simpleExecutor struct { +} + +func (e *simpleExecutor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { + return errors.New("Create failed") +} + +func (e *simpleExecutor) Restore(id string, stdout, stderr io.WriteCloser) error { + return nil +} + +func (e *simpleExecutor) IsRunning(id string) (bool, error) { + return false, nil +} + +func (e *simpleExecutor) Signal(id string, signal int) error { + return nil +} + +func TestCreateFailed(t *testing.T) { + root, err := ioutil.TempDir("", "test-create-failed") + if err != nil { + t.Fatal(err) + } + defer system.EnsureRemoveAll(root) + + s := NewStore() + managerRoot := filepath.Join(root, "manager") + p := newTestPlugin(t, "create", "testcreate", managerRoot) + + m, err := NewManager( + ManagerConfig{ + Store: s, + Root: managerRoot, + ExecRoot: filepath.Join(root, "exec"), + CreateExecutor: func(*Manager) (Executor, error) { return &simpleExecutor{}, nil }, + LogPluginEvent: func(_, _, _ string) {}, + }) + if err != nil { + t.Fatal(err) + } + + if err := s.Add(p); err != nil { + t.Fatal(err) + } + + if err := m.enable(p, &controller{}, false); err == nil { + t.Fatalf("expected Create failed error, got %v", err) + } + + if err := m.Remove(p.Name(), &types.PluginRmConfig{ForceRemove: true}); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go index 36ec76ae0..4438670a5 100644 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go +++ b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go @@ -105,7 +105,7 @@ Loop: } if len(call.Excludes.Caps) > 0 { for _, c := range call.Excludes.Caps { - if inSlice(rs.Process.Capabilities.Effective, c) { + if inSlice(rs.Process.Capabilities.Bounding, c) { continue Loop } } @@ -117,7 +117,7 @@ Loop: } if len(call.Includes.Caps) > 0 { for _, c := range call.Includes.Caps { - if !inSlice(rs.Process.Capabilities.Effective, c) { + if !inSlice(rs.Process.Capabilities.Bounding, c) { continue Loop } } diff --git a/vendor/github.com/docker/docker/reference/store_test.go b/vendor/github.com/docker/docker/reference/store_test.go index e423f5db2..24c0597a3 100644 --- a/vendor/github.com/docker/docker/reference/store_test.go +++ b/vendor/github.com/docker/docker/reference/store_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/docker/distribution/reference" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" digest "github.com/opencontainers/go-digest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var ( @@ -64,10 +64,10 @@ func TestLoad(t *testing.T) { func TestSave(t *testing.T) { jsonFile, err := ioutil.TempFile("", "tag-store-test") - require.NoError(t, err) + assert.NilError(t, err) _, err = jsonFile.Write([]byte(`{}`)) - require.NoError(t, err) + assert.NilError(t, err) jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) @@ -328,23 +328,23 @@ func TestAddDeleteGet(t *testing.T) { func TestInvalidTags(t *testing.T) { tmpDir, err := ioutil.TempDir("", "tag-store-test") - require.NoError(t, err) + assert.NilError(t, err) defer os.RemoveAll(tmpDir) store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) - require.NoError(t, err) + assert.NilError(t, err) id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") // sha256 as repo name ref, err := reference.ParseNormalizedNamed("sha256:abc") - require.NoError(t, err) + assert.NilError(t, err) err = store.AddTag(ref, id, true) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) // setting digest as a tag ref, err = reference.ParseNormalizedNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") - require.NoError(t, err) + assert.NilError(t, err) err = store.AddTag(ref, id, true) - assert.Error(t, err) + assert.Check(t, is.ErrorContains(err, "")) } diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go index 68bd01c48..de5a526b6 100644 --- a/vendor/github.com/docker/docker/registry/config.go +++ b/vendor/github.com/docker/docker/registry/config.go @@ -45,9 +45,6 @@ var ( // IndexName is the name of the index IndexName = "docker.io" - // NotaryServer is the endpoint serving the Notary trust server - NotaryServer = "https://notary.docker.io" - // DefaultV2Registry is the URI of the default v2 registry DefaultV2Registry = &url.URL{ Scheme: "https", diff --git a/vendor/github.com/docker/docker/registry/config_test.go b/vendor/github.com/docker/docker/registry/config_test.go index 61b1c26d3..4df9cdb94 100644 --- a/vendor/github.com/docker/docker/registry/config_test.go +++ b/vendor/github.com/docker/docker/registry/config_test.go @@ -6,7 +6,8 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestLoadAllowNondistributableArtifacts(t *testing.T) { @@ -311,9 +312,9 @@ func TestNewServiceConfig(t *testing.T) { for _, testCase := range testCases { _, err := newServiceConfig(testCase.opts) if testCase.errStr != "" { - assert.EqualError(t, err, testCase.errStr) + assert.Check(t, is.Error(err, testCase.errStr)) } else { - assert.Nil(t, err) + assert.Check(t, err) } } } @@ -347,8 +348,8 @@ func TestValidateIndexName(t *testing.T) { for _, testCase := range valid { result, err := ValidateIndexName(testCase.index) - if assert.NoError(t, err) { - assert.Equal(t, testCase.expect, result) + if assert.Check(t, err) { + assert.Check(t, is.Equal(testCase.expect, result)) } } @@ -375,6 +376,6 @@ func TestValidateIndexNameWithError(t *testing.T) { } for _, testCase := range invalid { _, err := ValidateIndexName(testCase.index) - assert.EqualError(t, err, testCase.err) + assert.Check(t, is.Error(err, testCase.err)) } } diff --git a/vendor/github.com/docker/docker/registry/registry_test.go b/vendor/github.com/docker/docker/registry/registry_test.go index 637530146..b4420d558 100644 --- a/vendor/github.com/docker/docker/registry/registry_test.go +++ b/vendor/github.com/docker/docker/registry/registry_test.go @@ -12,7 +12,7 @@ import ( "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" ) var ( @@ -757,12 +757,12 @@ func TestSearchRepositories(t *testing.T) { func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) - assert.False(t, trustedLocation(req)) + assert.Check(t, !trustedLocation(req)) } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) - assert.True(t, trustedLocation(req)) + assert.Check(t, trustedLocation(req)) } } diff --git a/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go index 9a08e3416..bd3d55885 100644 --- a/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go +++ b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestResumableRequestHeaderSimpleErrors(t *testing.T) { @@ -24,11 +24,11 @@ func TestResumableRequestHeaderSimpleErrors(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - require.NoError(t, err) + assert.NilError(t, err) resreq := &requestReader{} _, err = resreq.Read([]byte{}) - assert.EqualError(t, err, "client and request can't be nil") + assert.Check(t, is.Error(err, "client and request can't be nil")) resreq = &requestReader{ client: client, @@ -36,7 +36,7 @@ func TestResumableRequestHeaderSimpleErrors(t *testing.T) { totalSize: -1, } _, err = resreq.Read([]byte{}) - assert.EqualError(t, err, "failed to auto detect content length") + assert.Check(t, is.Error(err, "failed to auto detect content length")) } // Not too much failures, bails out after some wait @@ -45,7 +45,7 @@ func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) - require.NoError(t, err) + assert.NilError(t, err) resreq := &requestReader{ client: client, @@ -55,8 +55,8 @@ func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { waitDuration: 10 * time.Millisecond, } read, err := resreq.Read([]byte{}) - require.NoError(t, err) - assert.Equal(t, 0, read) + assert.NilError(t, err) + assert.Check(t, is.Equal(0, read)) } // Too much failures, returns the error @@ -65,7 +65,7 @@ func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { var badReq *http.Request badReq, err := http.NewRequest("GET", "I'm not an url", nil) - require.NoError(t, err) + assert.NilError(t, err) resreq := &requestReader{ client: client, @@ -77,8 +77,8 @@ func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` read, err := resreq.Read([]byte{}) - assert.EqualError(t, err, expectedError) - assert.Equal(t, 0, read) + assert.Check(t, is.Error(err, expectedError)) + assert.Check(t, is.Equal(0, read)) } type errorReaderCloser struct{} @@ -93,7 +93,7 @@ func (errorReaderCloser) Read(p []byte) (n int, err error) { func TestResumableRequestReaderWithReadError(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) - require.NoError(t, err) + assert.NilError(t, err) client := &http.Client{} @@ -116,15 +116,15 @@ func TestResumableRequestReaderWithReadError(t *testing.T) { buf := make([]byte, 1) read, err := resreq.Read(buf) - require.NoError(t, err) + assert.NilError(t, err) - assert.Equal(t, 0, read) + assert.Check(t, is.Equal(0, read)) } func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", "", nil) - require.NoError(t, err) + assert.NilError(t, err) client := &http.Client{} @@ -147,7 +147,7 @@ func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { buf := make([]byte, 1) _, err = resreq.Read(buf) - assert.EqualError(t, err, io.EOF.Error()) + assert.Check(t, is.Error(err, io.EOF.Error())) } func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { @@ -160,7 +160,7 @@ func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - require.NoError(t, err) + assert.NilError(t, err) client := &http.Client{} @@ -173,7 +173,7 @@ func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { buf := make([]byte, 2) _, err = resreq.Read(buf) - assert.EqualError(t, err, "the server doesn't support byte ranges") + assert.Check(t, is.Error(err, "the server doesn't support byte ranges")) } func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { @@ -186,7 +186,7 @@ func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - require.NoError(t, err) + assert.NilError(t, err) client := &http.Client{} retries := uint32(5) @@ -195,10 +195,10 @@ func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { defer resreq.Close() data, err := ioutil.ReadAll(resreq) - require.NoError(t, err) + assert.NilError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - assert.Equal(t, srvtxt, resstr) + assert.Check(t, is.Equal(srvtxt, resstr)) } func TestResumableRequestReader(t *testing.T) { @@ -211,7 +211,7 @@ func TestResumableRequestReader(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - require.NoError(t, err) + assert.NilError(t, err) client := &http.Client{} retries := uint32(5) @@ -221,10 +221,10 @@ func TestResumableRequestReader(t *testing.T) { defer resreq.Close() data, err := ioutil.ReadAll(resreq) - require.NoError(t, err) + assert.NilError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - assert.Equal(t, srvtxt, resstr) + assert.Check(t, is.Equal(srvtxt, resstr)) } func TestResumableRequestReaderWithInitialResponse(t *testing.T) { @@ -237,21 +237,21 @@ func TestResumableRequestReaderWithInitialResponse(t *testing.T) { var req *http.Request req, err := http.NewRequest("GET", ts.URL, nil) - require.NoError(t, err) + assert.NilError(t, err) client := &http.Client{} retries := uint32(5) imgSize := int64(len(srvtxt)) res, err := client.Do(req) - require.NoError(t, err) + assert.NilError(t, err) resreq := NewRequestReaderWithInitialResponse(client, req, retries, imgSize, res) defer resreq.Close() data, err := ioutil.ReadAll(resreq) - require.NoError(t, err) + assert.NilError(t, err) resstr := strings.TrimSuffix(string(data), "\n") - assert.Equal(t, srvtxt, resstr) + assert.Check(t, is.Equal(srvtxt, resstr)) } diff --git a/vendor/github.com/docker/docker/runconfig/config_test.go b/vendor/github.com/docker/docker/runconfig/config_test.go index 63619fe53..58e3a9f78 100644 --- a/vendor/github.com/docker/docker/runconfig/config_test.go +++ b/vendor/github.com/docker/docker/runconfig/config_test.go @@ -12,8 +12,8 @@ import ( "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) type f struct { @@ -149,21 +149,21 @@ func runDecodeContainerConfigTestCase(testcase decodeConfigTestcase) func(t *tes raw := marshal(t, testcase.wrapper, testcase.doc) config, hostConfig, _, err := decodeContainerConfig(bytes.NewReader(raw)) if testcase.expectedErr != "" { - if !assert.Error(t, err) { + if !assert.Check(t, is.ErrorContains(err, "")) { return } - assert.Contains(t, err.Error(), testcase.expectedErr) + assert.Check(t, is.Contains(err.Error(), testcase.expectedErr)) return } - assert.NoError(t, err) - assert.Equal(t, testcase.expectedConfig, config) - assert.Equal(t, testcase.expectedHostConfig, hostConfig) + assert.Check(t, err) + assert.Check(t, is.DeepEqual(testcase.expectedConfig, config)) + assert.Check(t, is.DeepEqual(testcase.expectedHostConfig, hostConfig)) } } func marshal(t *testing.T, w ContainerConfigWrapper, doc string) []byte { b, err := json.Marshal(w) - require.NoError(t, err, "%s: failed to encode config wrapper", doc) + assert.NilError(t, err, "%s: failed to encode config wrapper", doc) return b } diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go index 48d902d4d..d2482fbe7 100644 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_test.go +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_test.go @@ -10,7 +10,8 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" - "github.com/stretchr/testify/assert" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) // TODO Windows: This will need addressing for a Windows daemon. @@ -83,12 +84,12 @@ func TestIpcModeTest(t *testing.T) { } for ipcMode, state := range ipcModes { - assert.Equal(t, state.private, ipcMode.IsPrivate(), "IpcMode.IsPrivate() parsing failed for %q", ipcMode) - assert.Equal(t, state.host, ipcMode.IsHost(), "IpcMode.IsHost() parsing failed for %q", ipcMode) - assert.Equal(t, state.container, ipcMode.IsContainer(), "IpcMode.IsContainer() parsing failed for %q", ipcMode) - assert.Equal(t, state.shareable, ipcMode.IsShareable(), "IpcMode.IsShareable() parsing failed for %q", ipcMode) - assert.Equal(t, state.valid, ipcMode.Valid(), "IpcMode.Valid() parsing failed for %q", ipcMode) - assert.Equal(t, state.ctrName, ipcMode.Container(), "IpcMode.Container() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.private, ipcMode.IsPrivate()), "IpcMode.IsPrivate() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.host, ipcMode.IsHost()), "IpcMode.IsHost() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.container, ipcMode.IsContainer()), "IpcMode.IsContainer() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.shareable, ipcMode.IsShareable()), "IpcMode.IsShareable() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.valid, ipcMode.Valid()), "IpcMode.Valid() parsing failed for %q", ipcMode) + assert.Check(t, is.Equal(state.ctrName, ipcMode.Container()), "IpcMode.Container() parsing failed for %q", ipcMode) } } @@ -195,7 +196,7 @@ func TestDecodeHostConfig(t *testing.T) { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } - assert.False(t, c.Privileged) + assert.Check(t, !c.Privileged) if l := len(c.Binds); l != 1 { t.Fatalf("Expected 1 bind, found %d\n", l) diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index 766142e67..faf4f9c04 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -2,7 +2,6 @@ github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109 github.com/Microsoft/hcsshim v0.6.8 github.com/Microsoft/go-winio v0.4.6 -github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a @@ -19,14 +18,13 @@ golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6 golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 -github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/pmezard/go-difflib v1.0.0 -github.com/gotestyourself/gotestyourself 511344eed30e4384f010579a593dfb442033a692 -github.com/google/go-cmp v0.1.0 +github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029 +github.com/google/go-cmp v0.2.0 github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 github.com/imdario/mergo 0.2.1 -golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0 +golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8 github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2 @@ -34,7 +32,7 @@ github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2 #get libnetwork packages # When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly -github.com/docker/libnetwork ed2130d117c11c542327b4d5216a5db36770bc65 +github.com/docker/libnetwork 1b91bc94094ecfdae41daa465cc0c8df37dfb3dd github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec diff --git a/vendor/github.com/docker/docker/volume/store/db.go b/vendor/github.com/docker/docker/volume/store/db.go index fe6dbae9a..5a280ca2d 100644 --- a/vendor/github.com/docker/docker/volume/store/db.go +++ b/vendor/github.com/docker/docker/volume/store/db.go @@ -4,6 +4,7 @@ import ( "encoding/json" "github.com/boltdb/bolt" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -28,7 +29,10 @@ func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { if err != nil { return err } - b := tx.Bucket(volumeBucketName) + b, err := tx.CreateBucketIfNotExists(volumeBucketName) + if err != nil { + return errors.Wrap(err, "error creating volume bucket") + } return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") } @@ -42,8 +46,11 @@ func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { b := tx.Bucket(volumeBucketName) + if b == nil { + return errdefs.NotFound(errors.New("volume bucket does not exist")) + } val := b.Get([]byte(name)) - if string(val) == "" { + if len(val) == 0 { return nil } if err := json.Unmarshal(val, meta); err != nil { diff --git a/vendor/github.com/docker/docker/volume/store/db_test.go b/vendor/github.com/docker/docker/volume/store/db_test.go new file mode 100644 index 000000000..0a2727e74 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/db_test.go @@ -0,0 +1,52 @@ +package store + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/boltdb/bolt" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" +) + +func TestSetGetMeta(t *testing.T) { + t.Parallel() + + dir, err := ioutil.TempDir("", "test-set-get") + assert.NilError(t, err) + defer os.RemoveAll(dir) + + db, err := bolt.Open(filepath.Join(dir, "db"), 0600, &bolt.Options{Timeout: 1 * time.Second}) + assert.NilError(t, err) + + store := &VolumeStore{db: db} + + _, err = store.getMeta("test") + assert.Assert(t, is.ErrorContains(err, "")) + + err = db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket(volumeBucketName) + return err + }) + assert.NilError(t, err) + + meta, err := store.getMeta("test") + assert.NilError(t, err) + assert.DeepEqual(t, volumeMetadata{}, meta) + + testMeta := volumeMetadata{ + Name: "test", + Driver: "fake", + Labels: map[string]string{"a": "1", "b": "2"}, + Options: map[string]string{"foo": "bar"}, + } + err = store.setMeta("test", testMeta) + assert.NilError(t, err) + + meta, err = store.getMeta("test") + assert.NilError(t, err) + assert.DeepEqual(t, testMeta, meta) +} diff --git a/vendor/github.com/docker/docker/volume/store/restore_test.go b/vendor/github.com/docker/docker/volume/store/restore_test.go new file mode 100644 index 000000000..680735a38 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/store/restore_test.go @@ -0,0 +1,55 @@ +package store + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/docker/docker/volume" + volumedrivers "github.com/docker/docker/volume/drivers" + volumetestutils "github.com/docker/docker/volume/testutils" + "github.com/gotestyourself/gotestyourself/assert" +) + +func TestRestore(t *testing.T) { + t.Parallel() + + dir, err := ioutil.TempDir("", "test-restore") + assert.NilError(t, err) + defer os.RemoveAll(dir) + + driverName := "test-restore" + volumedrivers.Register(volumetestutils.NewFakeDriver(driverName), driverName) + defer volumedrivers.Unregister("test-restore") + + s, err := New(dir) + assert.NilError(t, err) + defer s.Shutdown() + + _, err = s.Create("test1", driverName, nil, nil) + assert.NilError(t, err) + + testLabels := map[string]string{"a": "1"} + testOpts := map[string]string{"foo": "bar"} + _, err = s.Create("test2", driverName, testOpts, testLabels) + assert.NilError(t, err) + + s.Shutdown() + + s, err = New(dir) + assert.NilError(t, err) + + v, err := s.Get("test1") + assert.NilError(t, err) + + dv := v.(volume.DetailedVolume) + var nilMap map[string]string + assert.DeepEqual(t, nilMap, dv.Options()) + assert.DeepEqual(t, nilMap, dv.Labels()) + + v, err = s.Get("test2") + assert.NilError(t, err) + dv = v.(volume.DetailedVolume) + assert.DeepEqual(t, testOpts, dv.Options()) + assert.DeepEqual(t, testLabels, dv.Labels()) +} diff --git a/vendor/github.com/docker/docker/volume/store/store.go b/vendor/github.com/docker/docker/volume/store/store.go index 643096a78..70dd8b2d8 100644 --- a/vendor/github.com/docker/docker/volume/store/store.go +++ b/vendor/github.com/docker/docker/volume/store/store.go @@ -29,7 +29,10 @@ type volumeWrapper struct { } func (v volumeWrapper) Options() map[string]string { - options := map[string]string{} + if v.options == nil { + return nil + } + options := make(map[string]string, len(v.options)) for key, value := range v.options { options[key] = value } @@ -37,7 +40,15 @@ func (v volumeWrapper) Options() map[string]string { } func (v volumeWrapper) Labels() map[string]string { - return v.labels + if v.labels == nil { + return nil + } + + labels := make(map[string]string, len(v.labels)) + for key, value := range v.labels { + labels[key] = value + } + return labels } func (v volumeWrapper) Scope() string { diff --git a/vendor/github.com/docker/docker/volume/store/store_test.go b/vendor/github.com/docker/docker/volume/store/store_test.go index 54b7d7cfc..faf4035e2 100644 --- a/vendor/github.com/docker/docker/volume/store/store_test.go +++ b/vendor/github.com/docker/docker/volume/store/store_test.go @@ -12,6 +12,9 @@ import ( "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" volumetestutils "github.com/docker/docker/volume/testutils" + "github.com/google/go-cmp/cmp" + "github.com/gotestyourself/gotestyourself/assert" + is "github.com/gotestyourself/gotestyourself/assert/cmp" ) func TestCreate(t *testing.T) { @@ -291,6 +294,7 @@ func TestDefererencePluginOnCreateError(t *testing.T) { pg := volumetestutils.NewFakePluginGetter(p) volumedrivers.RegisterPluginGetter(pg) + defer volumedrivers.RegisterPluginGetter(nil) dir, err := ioutil.TempDir("", "test-plugin-deref-err") if err != nil { @@ -320,3 +324,105 @@ func TestDefererencePluginOnCreateError(t *testing.T) { t.Fatalf("expected 1 plugin reference, got: %d", refs) } } + +func TestRefDerefRemove(t *testing.T) { + t.Parallel() + + driverName := "test-ref-deref-remove" + s, cleanup := setupTest(t, driverName) + defer cleanup(t) + + v, err := s.CreateWithRef("test", driverName, "test-ref", nil, nil) + assert.NilError(t, err) + + err = s.Remove(v) + assert.Assert(t, is.ErrorContains(err, "")) + assert.Equal(t, errVolumeInUse, err.(*OpErr).Err) + + s.Dereference(v, "test-ref") + err = s.Remove(v) + assert.NilError(t, err) +} + +func TestGet(t *testing.T) { + t.Parallel() + + driverName := "test-get" + s, cleanup := setupTest(t, driverName) + defer cleanup(t) + + _, err := s.Get("not-exist") + assert.Assert(t, is.ErrorContains(err, "")) + assert.Equal(t, errNoSuchVolume, err.(*OpErr).Err) + + v1, err := s.Create("test", driverName, nil, map[string]string{"a": "1"}) + assert.NilError(t, err) + + v2, err := s.Get("test") + assert.NilError(t, err) + assert.DeepEqual(t, v1, v2, cmpVolume) + + dv := v2.(volume.DetailedVolume) + assert.Equal(t, "1", dv.Labels()["a"]) + + err = s.Remove(v1) + assert.NilError(t, err) +} + +func TestGetWithRef(t *testing.T) { + t.Parallel() + + driverName := "test-get-with-ref" + s, cleanup := setupTest(t, driverName) + defer cleanup(t) + + _, err := s.GetWithRef("not-exist", driverName, "test-ref") + assert.Assert(t, is.ErrorContains(err, "")) + + v1, err := s.Create("test", driverName, nil, map[string]string{"a": "1"}) + assert.NilError(t, err) + + v2, err := s.GetWithRef("test", driverName, "test-ref") + assert.NilError(t, err) + assert.DeepEqual(t, v1, v2, cmpVolume) + + err = s.Remove(v2) + assert.Assert(t, is.ErrorContains(err, "")) + assert.Equal(t, errVolumeInUse, err.(*OpErr).Err) + + s.Dereference(v2, "test-ref") + err = s.Remove(v2) + assert.NilError(t, err) +} + +var cmpVolume = cmp.AllowUnexported(volumetestutils.FakeVolume{}, volumeWrapper{}) + +func setupTest(t *testing.T, name string) (*VolumeStore, func(*testing.T)) { + t.Helper() + s, cleanup := newTestStore(t) + + volumedrivers.Register(volumetestutils.NewFakeDriver(name), name) + return s, func(t *testing.T) { + cleanup(t) + volumedrivers.Unregister(name) + } +} + +func newTestStore(t *testing.T) (*VolumeStore, func(*testing.T)) { + t.Helper() + + dir, err := ioutil.TempDir("", "store-root") + assert.NilError(t, err) + + cleanup := func(t *testing.T) { + err := os.RemoveAll(dir) + assert.Check(t, err) + } + + s, err := New(dir) + assert.Check(t, err) + return s, func(t *testing.T) { + s.Shutdown() + cleanup(t) + } +} diff --git a/vendor/github.com/fnproject/fdk-go/fdk_test.go b/vendor/github.com/fnproject/fdk-go/fdk_test.go index 2100d4c06..115d20381 100644 --- a/vendor/github.com/fnproject/fdk-go/fdk_test.go +++ b/vendor/github.com/fnproject/fdk-go/fdk_test.go @@ -17,7 +17,7 @@ import ( "github.com/fnproject/fdk-go/utils" ) -func echoHTTPHandler(ctx context.Context, in io.Reader, out io.Writer) { +func echoHTTPHandler(_ context.Context, in io.Reader, out io.Writer) { io.Copy(out, in) WriteStatus(out, http.StatusTeapot+2) SetHeader(out, "yo", "dawg") @@ -67,19 +67,18 @@ func JSONHandler(_ context.Context, in io.Reader, out io.Writer) { } } -func JSONWithStatusCode(_ context.Context, in io.Reader, out io.Writer) { +func JSONWithStatusCode(_ context.Context, _ io.Reader, out io.Writer) { SetHeader(out, "Content-Type", "application/json") WriteStatus(out, 201) } func TestJSON(t *testing.T) { req := &utils.JsonIn{ - `{"name":"john"}`, - "application/json", - "someid", - "2018-01-30T16:52:39.786Z", - "sync", - utils.CallRequestHTTP{ + CallID: "someid", + Body: `{"name":"john"}`, + ContentType: "application/json", + Deadline: "2018-01-30T16:52:39.786Z", + Protocol: utils.CallRequestHTTP{ Type: "http", RequestURL: "someURL", Headers: http.Header{}, @@ -146,12 +145,11 @@ func TestJSONEOF(t *testing.T) { func TestJSONOverwriteStatusCodeAndHeaders(t *testing.T) { var out, buf bytes.Buffer req := &utils.JsonIn{ - `{"name":"john"}`, - "application/json", - "someid", - "2018-01-30T16:52:39.786Z", - "sync", - utils.CallRequestHTTP{ + CallID: "someid", + Body: `{"name":"john"}`, + ContentType: "application/json", + Deadline: "2018-01-30T16:52:39.786Z", + Protocol: utils.CallRequestHTTP{ Type: "json", RequestURL: "someURL", Headers: http.Header{}, diff --git a/vendor/github.com/fnproject/fdk-go/utils/utils.go b/vendor/github.com/fnproject/fdk-go/utils/utils.go index ed9dc85e7..96910b87a 100644 --- a/vendor/github.com/fnproject/fdk-go/utils/utils.go +++ b/vendor/github.com/fnproject/fdk-go/utils/utils.go @@ -101,11 +101,10 @@ type CallRequestHTTP struct { } type JsonIn struct { - Body string `json:"body"` - ContentType string `json:"content_type"` CallID string `json:"call_id"` Deadline string `json:"deadline"` - Type string `json:"type"` + Body string `json:"body"` + ContentType string `json:"content_type"` Protocol CallRequestHTTP `json:"protocol"` } diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml index d0f383a26..2ee3ab975 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -1,6 +1,8 @@ language: go go: -- 1.7 +- "1.8" +- "1.9" +- "1.10" install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/go-openapi/swag diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml index 5b31a1b3e..7a261a651 100644 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -1,6 +1,8 @@ language: go go: -- 1.7 +- "1.8" +- "1.9" +- "1.10" install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/PuerkitoBio/purell diff --git a/vendor/github.com/go-openapi/jsonreference/reference_test.go b/vendor/github.com/go-openapi/jsonreference/reference_test.go index 499c634c5..2bfadcede 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference_test.go +++ b/vendor/github.com/go-openapi/jsonreference/reference_test.go @@ -308,7 +308,7 @@ func TestFileScheme(t *testing.T) { } if r1.IsCanonical() != true { - t.Errorf("New(%v)::IsCanonical %v expect %v", in1, r1.IsCanonical, true) + t.Errorf("New(%v)::IsCanonical %v expect %v", in1, r1.IsCanonical(), true) } result, err := r1.Inherits(r2) diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go new file mode 100644 index 000000000..f83254515 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/keepalive.go @@ -0,0 +1,53 @@ +package client + +import ( + "io" + "io/ioutil" + "net/http" + "sync/atomic" +) + +// KeepAliveTransport drains the remaining body from a response +// so that go will reuse the TCP connections. +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { + return &keepAliveTransport{wrapped: rt} +} + +type keepAliveTransport struct { + wrapped http.RoundTripper +} + +func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := k.wrapped.RoundTrip(r) + if err != nil { + return resp, err + } + resp.Body = &drainingReadCloser{rdr: resp.Body} + return resp, nil +} + +type drainingReadCloser struct { + rdr io.ReadCloser + seenEOF uint32 +} + +func (d *drainingReadCloser) Read(p []byte) (n int, err error) { + n, err = d.rdr.Read(p) + if err == io.EOF || n == 0 { + atomic.StoreUint32(&d.seenEOF, 1) + } + return +} + +func (d *drainingReadCloser) Close() error { + // drain buffer + if atomic.LoadUint32(&d.seenEOF) != 1 { + //#nosec + io.Copy(ioutil.Discard, d.rdr) + } + return d.rdr.Close() +} diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive_test.go b/vendor/github.com/go-openapi/runtime/client/keepalive_test.go new file mode 100644 index 000000000..4ac035854 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client/keepalive_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "io" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" +) + +func newCountingReader(rdr io.Reader, readOnce bool) *countingReadCloser { + return &countingReadCloser{ + rdr: rdr, + readOnce: readOnce, + } +} + +type countingReadCloser struct { + rdr io.Reader + readOnce bool + readCalled int + closeCalled int +} + +func (c *countingReadCloser) Read(b []byte) (int, error) { + c.readCalled++ + if c.readCalled > 1 && c.readOnce { + return 0, io.EOF + } + return c.rdr.Read(b) +} + +func (c *countingReadCloser) Close() error { + c.closeCalled++ + return nil +} + +func TestDrainingReadCloser(t *testing.T) { + rdr := newCountingReader(bytes.NewBufferString("There are many things to do"), false) + prevDisc := ioutil.Discard + disc := bytes.NewBuffer(nil) + ioutil.Discard = disc + defer func() { ioutil.Discard = prevDisc }() + + buf := make([]byte, 5) + ts := &drainingReadCloser{rdr: rdr} + ts.Read(buf) + ts.Close() + assert.Equal(t, "There", string(buf)) + assert.Equal(t, " are many things to do", disc.String()) + assert.Equal(t, 3, rdr.readCalled) + assert.Equal(t, 1, rdr.closeCalled) +} + +func TestDrainingReadCloser_SeenEOF(t *testing.T) { + rdr := newCountingReader(bytes.NewBufferString("There are many things to do"), true) + prevDisc := ioutil.Discard + disc := bytes.NewBuffer(nil) + ioutil.Discard = disc + defer func() { ioutil.Discard = prevDisc }() + + buf := make([]byte, 5) + ts := &drainingReadCloser{rdr: rdr} + ts.Read(buf) + _, err := ts.Read(nil) + assert.Equal(t, io.EOF, err) + ts.Close() + assert.Equal(t, string(buf), "There") + assert.Equal(t, disc.String(), "") + assert.Equal(t, 2, rdr.readCalled) + assert.Equal(t, 1, rdr.closeCalled) +} diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go index 2e60ccfb0..e272a71b2 100644 --- a/vendor/github.com/go-openapi/runtime/client/request.go +++ b/vendor/github.com/go-openapi/runtime/client/request.go @@ -74,6 +74,14 @@ var ( _ runtime.ClientRequest = new(request) ) +func (r *request) isMultipart(mediaType string) bool { + if len(r.fileFields) > 0 { + return true + } + + return runtime.MultipartFormMime == mediaType +} + // BuildHTTP creates a new http request based on the data from the params func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { return r.buildHTTP(mediaType, basePath, producers, registry, nil) @@ -111,7 +119,7 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run r.buf = bytes.NewBuffer(nil) if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 { body = ioutil.NopCloser(r.buf) - if ((runtime.MultipartFormMime == mediaType || runtime.URLencodedFormMime == mediaType) && len(r.formFields) > 0) || r.fileFields != nil { + if r.isMultipart(mediaType) { pr, pw = io.Pipe() body = pr } @@ -127,58 +135,58 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run // check if this is a form type request if len(r.formFields) > 0 || len(r.fileFields) > 0 { - // check if this is multipart - if runtime.MultipartFormMime == mediaType || runtime.URLencodedFormMime == mediaType || len(r.fileFields) > 0 { - mp := multipart.NewWriter(pw) - req.Header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) - - go func() { - defer func() { - mp.Close() - pw.Close() - }() - - for fn, v := range r.formFields { - for _, vi := range v { - if err := mp.WriteField(fn, vi); err != nil { - pw.CloseWithError(err) - log.Println(err) - } - } - } - - defer func() { - for _, ff := range r.fileFields { - for _, ffi := range ff { - ffi.Close() - } - } - }() - for fn, f := range r.fileFields { - for _, fi := range f { - wrtr, err := mp.CreateFormFile(fn, filepath.Base(fi.Name())) - if err != nil { - pw.CloseWithError(err) - log.Println(err) - } - if _, err := io.Copy(wrtr, fi); err != nil { - pw.CloseWithError(err) - log.Println(err) - } - } - } - - }() + if !r.isMultipart(mediaType) { + req.Header.Set(runtime.HeaderContentType, mediaType) + formString := r.formFields.Encode() + // set content length before writing to the buffer + req.ContentLength = int64(len(formString)) + // write the form values as the body + r.buf.WriteString(formString) return req, nil } - req.Header.Set(runtime.HeaderContentType, mediaType) - formString := r.formFields.Encode() - // set content length before writing to the buffer - req.ContentLength = int64(len(formString)) - // write the form values as the body - r.buf.WriteString(formString) + mp := multipart.NewWriter(pw) + req.Header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) + + go func() { + defer func() { + mp.Close() + pw.Close() + }() + + for fn, v := range r.formFields { + for _, vi := range v { + if err := mp.WriteField(fn, vi); err != nil { + pw.CloseWithError(err) + log.Println(err) + } + } + } + + defer func() { + for _, ff := range r.fileFields { + for _, ffi := range ff { + ffi.Close() + } + } + }() + for fn, f := range r.fileFields { + for _, fi := range f { + wrtr, err := mp.CreateFormFile(fn, filepath.Base(fi.Name())) + if err != nil { + pw.CloseWithError(err) + log.Println(err) + } + if _, err := io.Copy(wrtr, fi); err != nil { + pw.CloseWithError(err) + log.Println(err) + } + } + } + + }() return req, nil + } // if there is payload, use the producer to write the payload, and then diff --git a/vendor/github.com/go-openapi/runtime/client/request_test.go b/vendor/github.com/go-openapi/runtime/client/request_test.go index 99f9bb839..e6ee6acf0 100644 --- a/vendor/github.com/go-openapi/runtime/client/request_test.go +++ b/vendor/github.com/go-openapi/runtime/client/request_test.go @@ -268,6 +268,29 @@ func TestBuildRequest_BuildHTTP_Form(t *testing.T) { } } +func TestBuildRequest_BuildHTTP_Form_URLEncoded(t *testing.T) { + reqWrtr := runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { + _ = req.SetFormParam("something", "some value") + _ = req.SetQueryParam("hello", "world") + _ = req.SetPathParam("id", "1234") + _ = req.SetHeaderParam("X-Rate-Limit", "200") + return nil + }) + r, _ := newRequest("GET", "/flats/{id}/", reqWrtr) + _ = r.SetHeaderParam(runtime.HeaderContentType, runtime.URLencodedFormMime) + + req, err := r.BuildHTTP(runtime.URLencodedFormMime, "", testProducers, nil) + if assert.NoError(t, err) && assert.NotNil(t, req) { + assert.Equal(t, "200", req.Header.Get("x-rate-limit")) + assert.Equal(t, runtime.URLencodedFormMime, req.Header.Get(runtime.HeaderContentType)) + assert.Equal(t, "world", req.URL.Query().Get("hello")) + assert.Equal(t, "/flats/1234/", req.URL.Path) + expected := []byte("something=some+value") + actual, _ := ioutil.ReadAll(req.Body) + assert.Equal(t, expected, actual) + } +} + func TestBuildRequest_BuildHTTP_Form_Content_Length(t *testing.T) { reqWrtr := runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { _ = req.SetFormParam("something", "some value") diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go index 29d2fe929..bae9c3c5c 100644 --- a/vendor/github.com/go-openapi/runtime/client/runtime.go +++ b/vendor/github.com/go-openapi/runtime/client/runtime.go @@ -276,6 +276,34 @@ func (r *Runtime) selectScheme(schemes []string) string { } return scheme } +func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { + if left == nil { + return right + } + return left +} + +// EnableConnectionReuse drains the remaining body from a response +// so that go will reuse the TCP connections. +// +// This is not enabled by default because there are servers where +// the response never gets closed and that would make the code hang forever. +// So instead it's provided as a http client middleware that can be used to override +// any request. +func (r *Runtime) EnableConnectionReuse() { + if r.client == nil { + r.Transport = KeepAliveTransport( + transportOrDefault(r.Transport, http.DefaultTransport), + ) + return + } + + r.client.Transport = KeepAliveTransport( + transportOrDefault(r.client.Transport, + transportOrDefault(r.Transport, http.DefaultTransport), + ), + ) +} // Submit a request and when there is a body on success it will turn that into the result // all other things are turned into an api error for swagger which retains the status code diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index cebb5cc59..d8ce4eec1 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -679,6 +679,12 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba if t != nil { parentRefs = append(parentRefs, normalizedRef.String()) + var err error + resolver, err = transitiveResolver(basePath, target.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return nil, err + } + return expandSchema(*t, parentRefs, resolver, normalizedBasePath) } } @@ -814,6 +820,13 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if pathItem.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, pathItem.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } pathItem.Ref = Ref{} parentRefs = parentRefs[0:] @@ -874,6 +887,24 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } +func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*schemaLoader, error) { + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + // Set a new root to resolve against + if !strings.HasPrefix(currentRef.String(), baseRef.String()) { + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := resolver.cache.Get(rootURL.String()) + var err error + resolver, err = defaultSchemaLoader(root, resolver.options, resolver.cache) + if err != nil { + return nil, err + } + } + + return resolver, nil +} + // ExpandResponse expands a response based on a basepath // This is the exported version of expandResponse // all refs inside response will be resolved relative to basePath @@ -922,6 +953,13 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string) if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if response.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, response.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } response.Ref = Ref{} parentRefs = parentRefs[0:] @@ -984,6 +1022,13 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath stri if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if parameter.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, parameter.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } parameter.Ref = Ref{} parentRefs = parentRefs[0:] diff --git a/vendor/github.com/go-openapi/spec/expander_test.go b/vendor/github.com/go-openapi/spec/expander_test.go index 82a6affe8..0116e3395 100644 --- a/vendor/github.com/go-openapi/spec/expander_test.go +++ b/vendor/github.com/go-openapi/spec/expander_test.go @@ -1170,6 +1170,25 @@ func TestResolveLocalRef_Response(t *testing.T) { } } +func TestResolveForTransitiveRefs(t *testing.T) { + var spec *Swagger + rawSpec, err := ioutil.ReadFile("fixtures/specs/todos.json") + assert.NoError(t, err) + + basePath, err := absPath("fixtures/specs/todos.json") + assert.NoError(t, err) + + opts := &ExpandOptions{ + RelativeBase: basePath, + } + + err = json.Unmarshal(rawSpec, &spec) + assert.NoError(t, err) + + err = ExpandSpec(spec, opts) + assert.NoError(t, err) +} + // PetStoreJSONMessage json raw message for Petstore20 var PetStoreJSONMessage = json.RawMessage([]byte(PetStore20)) diff --git a/vendor/github.com/go-openapi/spec/fixtures/specs/todos.common.json b/vendor/github.com/go-openapi/spec/fixtures/specs/todos.common.json new file mode 100644 index 000000000..1c43908a6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/fixtures/specs/todos.common.json @@ -0,0 +1,103 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0", + "title": "To-do Demo", + "description": + "### Notes:\n\nThis OAS2 (Swagger 2) specification defines common models and responses, that other specifications may reference.\n\nFor example, check out the user poperty in the main.oas2 todo-partial model - it references the user model in this specification!\n\nLikewise, the main.oas2 operations reference the shared error responses in this common specification.", + "contact": { + "name": "Stoplight", + "url": "https://stoplight.io" + }, + "license": { + "name": "MIT" + } + }, + "host": "example.com", + "securityDefinitions": {}, + "paths": {}, + "responses": { + "401": { + "description": "", + "schema": { + "$ref": "#/definitions/error-response" + }, + "examples": { + "application/json": { + "status": "401", + "error": "Not Authorized" + } + } + }, + "403": { + "description": "", + "schema": { + "$ref": "#/definitions/error-response" + }, + "examples": { + "application/json": { + "status": "403", + "error": "Forbbiden" + } + } + }, + "404": { + "description": "", + "schema": { + "$ref": "#/definitions/error-response" + }, + "examples": { + "application/json": { + "status": "404", + "error": "Not Found" + } + } + }, + "500": { + "description": "", + "schema": { + "$ref": "#/definitions/error-response" + }, + "examples": { + "application/json": { + "status": "500", + "error": "Server Error" + } + } + } + }, + "definitions": { + "user": { + "title": "User", + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "The user's full name." + }, + "age": { + "type": "number", + "minimum": 0, + "maximum": 150 + }, + "error": { + "$ref": "#/definitions/error-response" + } + }, + "required": ["name", "age"] + }, + "error-response": { + "type": "object", + "title": "Error Response", + "properties": { + "status": { + "type": "string" + }, + "error": { + "type": "string" + } + }, + "required": ["status", "error"] + } + } +} diff --git a/vendor/github.com/go-openapi/spec/fixtures/specs/todos.json b/vendor/github.com/go-openapi/spec/fixtures/specs/todos.json new file mode 100644 index 000000000..b9460bdc4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/fixtures/specs/todos.json @@ -0,0 +1,346 @@ +{ + "swagger": "2.0", + "info": { + "version": "1.0", + "title": "To-do Demo", + "description": "This OAS2 (Swagger 2) file represents a real API that lives at http://todos.stoplight.io.\n\nFor authentication information, click the apikey security scheme in the editor sidebar.", + "contact": { + "name": "Stoplight", + "url": "https://stoplight.io" + }, + "license": { + "name": "MIT" + } + }, + "host": "todos.stoplight.io", + "schemes": ["http"], + "consumes": ["application/json"], + "produces": ["application/json"], + "securityDefinitions": { + "Basic": { + "type": "basic" + }, + "API Key": { + "type": "apiKey", + "name": "apikey", + "in": "query" + } + }, + "paths": { + "/todos/{todoId}": { + "parameters": [{ + "name": "todoId", + "in": "path", + "required": true, + "type": "string" + }], + "get": { + "operationId": "GET_todo", + "summary": "Get Todo", + "tags": ["Todos"], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/todo-full" + }, + "examples": { + "application/json": { + "id": 1, + "name": "get food", + "completed": false, + "completed_at": "1955-04-23T13:22:52.685Z", + "created_at": "1994-11-05T03:26:51.471Z", + "updated_at": "1989-07-29T11:30:06.701Z" + }, + "/todos/foobar": "{\n\t\"foo\": \"bar\"\n}\n", + "/todos/chores": { + "id": 9000, + "name": "Do Chores", + "completed": false, + "created_at": "2014-08-28T14:14:28.494Z", + "updated_at": "2014-08-28T14:14:28.494Z" + }, + "new": { + "name": "esse qui proident labore", + "completed": null, + "id": 920778, + "completed_at": "2014-01-07T07:49:55.123Z", + "created_at": "1948-04-21T12:04:21.282Z", + "updated_at": "1951-12-19T11:10:34.039Z", + "user": { + "name": "irure deserunt fugiat", + "age": 121.45395681110494 + }, + "float": -47990796.228164576 + } + } + }, + "404": { + "$ref": "./todos.common.json#/responses/404" + }, + "500": { + "$ref": "./todos.common.json#/responses/500" + } + }, + "parameters": [{ + "in": "query", + "name": "", + "type": "string" + }] + }, + "put": { + "operationId": "PUT_todos", + "summary": "Update Todo", + "tags": ["Todos"], + "parameters": [{ + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/todo-partial", + "example": { + "name": "my todo's new name", + "completed": false + } + } + }], + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/todo-full" + }, + "examples": { + "application/json": { + "id": 9000, + "name": "It's Over 9000!!!", + "completed": true, + "completed_at": null, + "created_at": "2014-08-28T14:14:28.494Z", + "updated_at": "2015-08-28T14:14:28.494Z" + } + } + }, + "401": { + "$ref": "./todos.common.json#/responses/401" + }, + "404": { + "$ref": "./todos.common.json#/responses/404" + }, + "500": { + "$ref": "./todos.common.json#/responses/500" + } + }, + "security": [{ + "Basic": [] + }, + { + "API Key": [] + } + ] + }, + "delete": { + "operationId": "DELETE_todo", + "summary": "Delete Todo", + "tags": ["Todos"], + "responses": { + "204": { + "description": "" + }, + "401": { + "$ref": "./todos.common.json#/responses/401" + }, + "404": { + "$ref": "./todos.common.json#/responses/404" + }, + "500": { + "$ref": "./todos.common.json#/responses/500" + } + }, + "security": [{ + "Basic": [] + }, + { + "API Key": [] + } + ] + } + }, + "/todos": { + "post": { + "operationId": "POST_todos", + "summary": "Create Todo", + "tags": ["Todos"], + "parameters": [{ + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/todo-partial", + "example": { + "name": "my todo's name", + "completed": false + } + } + }], + "responses": { + "201": { + "description": "", + "schema": { + "$ref": "#/definitions/todo-full" + }, + "examples": { + "application/json": { + "id": 9000, + "name": "It's Over 9000!!!", + "completed": null, + "completed_at": null, + "created_at": "2014-08-28T14:14:28.494Z", + "updated_at": "2014-08-28T14:14:28.494Z" + }, + "/todos/chores": { + "id": 9000, + "name": "Do Chores", + "completed": false, + "created_at": "2014-08-28T14:14:28.494Z", + "updated_at": "2014-08-28T14:14:28.494Z" + } + } + }, + "401": { + "$ref": "./todos.common.json#/responses/401" + }, + "500": { + "$ref": "./todos.common.json#/responses/500" + } + }, + "security": [{ + "API Key": [] + }, + { + "Basic": [] + } + ], + "description": "This creates a Todo object.\n\nTesting `inline code`." + }, + "get": { + "operationId": "GET_todos", + "summary": "List Todos", + "tags": ["Todos"], + "parameters": [{ + "$ref": "#/parameters/limit" + }, + { + "$ref": "#/parameters/skip" + } + ], + "responses": { + "200": { + "description": "", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/todo-full" + } + }, + "examples": { + "application/json": [{ + "id": 1, + "name": "design the thingz", + "completed": true + }, + { + "id": 2, + "name": "mock the thingz", + "completed": true + }, + { + "id": 3, + "name": "code the thingz", + "completed": false + } + ], + "empty": [] + }, + "headers": { + "foo": { + "type": "string", + "default": "bar" + } + } + }, + "500": { + "$ref": "./todos.common.json#/responses/500" + } + }, + "description": "​" + } + } + }, + "parameters": { + "limit": { + "name": "limit", + "in": "query", + "description": "This is how it works.", + "required": false, + "type": "integer", + "maximum": 100 + }, + "skip": { + "name": "skip", + "in": "query", + "required": false, + "type": "string" + } + }, + "definitions": { + "todo-partial": { + "title": "Todo Partial", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "completed": { + "type": ["boolean", "null"] + } + }, + "required": ["name", "completed"] + }, + "todo-full": { + "title": "Todo Full", + "allOf": [{ + "$ref": "#/definitions/todo-partial" + }, + { + "type": "object", + "properties": { + "id": { + "type": "integer", + "minimum": 0, + "maximum": 1000000 + }, + "completed_at": { + "type": ["string", "null"], + "format": "date-time" + }, + "created_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "user": { + "$ref": "./todos.common.json#/definitions/user" + } + }, + "required": ["id", "user"] + } + ] + } + }, + "tags": [{ + "name": "Todos" + }] +} diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index 3e9c3e619..d5a4f9861 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -33,8 +33,7 @@ func init() { // IsBSONObjectID returns true when the string is a valid BSON.ObjectId func IsBSONObjectID(str string) bool { - var id bson.ObjectId - return id.UnmarshalText([]byte(str)) == nil + return bson.IsObjectIdHex(str) } // ObjectId represents a BSON object ID (alias to gopkg.in/mgo.v2/bson.ObjectId) @@ -54,12 +53,7 @@ func (id *ObjectId) MarshalText() ([]byte, error) { // UnmarshalText hydrates this instance from text func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on - var rawID bson.ObjectId - if err := rawID.UnmarshalText(data); err != nil { - return err - } - - *id = ObjectId(rawID) + *id = ObjectId(bson.ObjectIdHex(string(data))) return nil } diff --git a/vendor/github.com/lib/pq/.travis.sh b/vendor/github.com/lib/pq/.travis.sh index ead01df73..a297dc452 100755 --- a/vendor/github.com/lib/pq/.travis.sh +++ b/vendor/github.com/lib/pq/.travis.sh @@ -71,12 +71,6 @@ postgresql_uninstall() { } megacheck_install() { - # Megacheck is Go 1.6+, so skip if Go 1.5. - if [[ "$(go version)" =~ "go1.5" ]] - then - echo "megacheck not supported, skipping installation" - return 0 - fi # Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous # new error messages in old code. go get -d honnef.co/go/tools/... @@ -86,12 +80,6 @@ megacheck_install() { } golint_install() { - # Golint is Go 1.6+, so skip if Go 1.5. - if [[ "$(go version)" =~ "go1.5" ]] - then - echo "golint not supported, skipping installation" - return 0 - fi go get github.com/golang/lint/golint } diff --git a/vendor/github.com/lib/pq/.travis.yml b/vendor/github.com/lib/pq/.travis.yml index 79c59a81d..18556e089 100644 --- a/vendor/github.com/lib/pq/.travis.yml +++ b/vendor/github.com/lib/pq/.travis.yml @@ -1,10 +1,9 @@ language: go go: - - 1.6.x - - 1.7.x - 1.8.x - 1.9.x + - 1.10.x - master sudo: true @@ -15,7 +14,7 @@ env: - PQGOSSLTESTS=1 - PQSSLCERTTEST_PATH=$PWD/certs - PGHOST=127.0.0.1 - - MEGACHECK_VERSION=2017.2.1 + - MEGACHECK_VERSION=2017.2.2 matrix: - PGVERSION=10 - PGVERSION=9.6 @@ -45,13 +44,7 @@ script: - > goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }' - go vet ./... - # For compatibility with Go 1.5, launch only if megacheck is present. - - > - which megacheck > /dev/null && megacheck -go 1.5 ./... - || echo 'megacheck is not supported, skipping check' - # For compatibility with Go 1.5, launch only if golint is present. - - > - which golint > /dev/null && golint ./... - || echo 'golint is not supported, skipping check' + - megacheck -go 1.8 ./... + - golint ./... - PQTEST_BINARY_PARAMETERS=no go test -race -v ./... - PQTEST_BINARY_PARAMETERS=yes go test -race -v ./... diff --git a/vendor/github.com/lib/pq/bench_test.go b/vendor/github.com/lib/pq/bench_test.go index e71f41d06..33d7a02f0 100644 --- a/vendor/github.com/lib/pq/bench_test.go +++ b/vendor/github.com/lib/pq/bench_test.go @@ -5,6 +5,7 @@ package pq import ( "bufio" "bytes" + "context" "database/sql" "database/sql/driver" "io" @@ -156,7 +157,7 @@ func benchMockQuery(b *testing.B, c *conn, query string) { b.Fatal(err) } defer stmt.Close() - rows, err := stmt.Query(nil) + rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil) if err != nil { b.Fatal(err) } @@ -266,7 +267,7 @@ func BenchmarkMockPreparedSelectSeries(b *testing.B) { } func benchPreparedMockQuery(b *testing.B, c *conn, stmt driver.Stmt) { - rows, err := stmt.Query(nil) + rows, err := stmt.(driver.StmtQueryContext).QueryContext(context.Background(), nil) if err != nil { b.Fatal(err) } diff --git a/vendor/github.com/lib/pq/conn_test.go b/vendor/github.com/lib/pq/conn_test.go index 7c0f30eb7..e654b85be 100644 --- a/vendor/github.com/lib/pq/conn_test.go +++ b/vendor/github.com/lib/pq/conn_test.go @@ -1,6 +1,7 @@ package pq import ( + "context" "database/sql" "database/sql/driver" "fmt" @@ -1263,8 +1264,8 @@ func TestParseComplete(t *testing.T) { // Test interface conformance. var ( - _ driver.Execer = (*conn)(nil) - _ driver.Queryer = (*conn)(nil) + _ driver.ExecerContext = (*conn)(nil) + _ driver.QueryerContext = (*conn)(nil) ) func TestNullAfterNonNull(t *testing.T) { @@ -1609,10 +1610,10 @@ func TestRowsResultTag(t *testing.T) { t.Fatal(err) } defer conn.Close() - q := conn.(driver.Queryer) + q := conn.(driver.QueryerContext) for _, test := range tests { - if rows, err := q.Query(test.query, nil); err != nil { + if rows, err := q.QueryContext(context.Background(), test.query, nil); err != nil { t.Fatalf("%s: %s", test.query, err) } else { r := rows.(ResultTag) diff --git a/vendor/github.com/lib/pq/copy_test.go b/vendor/github.com/lib/pq/copy_test.go index c1a3cd7fa..a888a8948 100644 --- a/vendor/github.com/lib/pq/copy_test.go +++ b/vendor/github.com/lib/pq/copy_test.go @@ -4,6 +4,7 @@ import ( "bytes" "database/sql" "database/sql/driver" + "net" "strings" "testing" ) @@ -400,15 +401,19 @@ func TestCopyRespLoopConnectionError(t *testing.T) { if err == nil { t.Fatalf("expected error") } - pge, ok := err.(*Error) - if !ok { + switch pge := err.(type) { + case *Error: + if pge.Code.Name() != "admin_shutdown" { + t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) + } + case *net.OpError: + // ignore + default: if err == driver.ErrBadConn { // likely an EPIPE } else { - t.Fatalf("expected *pq.Error or driver.ErrBadConn, got %+#v", err) + t.Fatalf("unexpected error, got %+#v", err) } - } else if pge.Code.Name() != "admin_shutdown" { - t.Fatalf("expected admin_shutdown, got %s", pge.Code.Name()) } _ = stmt.Close() diff --git a/vendor/github.com/lib/pq/encode_test.go b/vendor/github.com/lib/pq/encode_test.go index 634a05c66..d58798a46 100644 --- a/vendor/github.com/lib/pq/encode_test.go +++ b/vendor/github.com/lib/pq/encode_test.go @@ -4,7 +4,7 @@ import ( "bytes" "database/sql" "fmt" - "strings" + "regexp" "testing" "time" @@ -304,24 +304,27 @@ func TestInfinityTimestamp(t *testing.T) { var err error var resultT time.Time - expectedErrorStrPrefix := `sql: Scan error on column index 0: unsupported` + expectedErrorStrRegexp := regexp.MustCompile( + `^sql: Scan error on column index 0(, name "timestamp(tz)?"|): unsupported`) + type testCases []struct { - Query string - Param string - ExpectedErrStrPrefix string - ExpectedVal interface{} + Query string + Param string + ExpectedErrorStrRegexp *regexp.Regexp + ExpectedVal interface{} } tc := testCases{ - {"SELECT $1::timestamp", "-infinity", expectedErrorStrPrefix, "-infinity"}, - {"SELECT $1::timestamptz", "-infinity", expectedErrorStrPrefix, "-infinity"}, - {"SELECT $1::timestamp", "infinity", expectedErrorStrPrefix, "infinity"}, - {"SELECT $1::timestamptz", "infinity", expectedErrorStrPrefix, "infinity"}, + {"SELECT $1::timestamp", "-infinity", expectedErrorStrRegexp, "-infinity"}, + {"SELECT $1::timestamptz", "-infinity", expectedErrorStrRegexp, "-infinity"}, + {"SELECT $1::timestamp", "infinity", expectedErrorStrRegexp, "infinity"}, + {"SELECT $1::timestamptz", "infinity", expectedErrorStrRegexp, "infinity"}, } // try to assert []byte to time.Time for _, q := range tc { err = db.QueryRow(q.Query, q.Param).Scan(&resultT) - if !strings.HasPrefix(err.Error(), q.ExpectedErrStrPrefix) { - t.Errorf("Scanning -/+infinity, expected error to have prefix %q, got %q", q.ExpectedErrStrPrefix, err) + if !q.ExpectedErrorStrRegexp.MatchString(err.Error()) { + t.Errorf("Scanning -/+infinity, expected error to match regexp %q, got %q", + q.ExpectedErrorStrRegexp, err) } } // yield []byte diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go index 6928d9670..2afbc9c98 100644 --- a/vendor/github.com/lib/pq/error.go +++ b/vendor/github.com/lib/pq/error.go @@ -488,7 +488,8 @@ func (c *conn) errRecover(err *error) { *err = v } case *net.OpError: - *err = driver.ErrBadConn + c.bad = true + *err = v case error: if v == io.EOF || v.(error).Error() == "remote error: handshake failure" { *err = driver.ErrBadConn diff --git a/vendor/github.com/lib/pq/go18_test.go b/vendor/github.com/lib/pq/go18_test.go index 4bf6391ef..1a88a5b44 100644 --- a/vendor/github.com/lib/pq/go18_test.go +++ b/vendor/github.com/lib/pq/go18_test.go @@ -228,7 +228,9 @@ func TestContextCancelBegin(t *testing.T) { cancel() if err != nil { t.Fatal(err) - } else if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { + } else if err := tx.Rollback(); err != nil && + err.Error() != "pq: canceling statement due to user request" && + err != sql.ErrTxDone { t.Fatal(err) } }() diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go index 304e081fe..947d189f4 100644 --- a/vendor/github.com/lib/pq/notify.go +++ b/vendor/github.com/lib/pq/notify.go @@ -784,7 +784,7 @@ func (l *Listener) listenerConnLoop() { } l.emitEvent(ListenerEventDisconnected, err) - time.Sleep(nextReconnect.Sub(time.Now())) + time.Sleep(time.Until(nextReconnect)) } } diff --git a/vendor/github.com/mailru/easyjson/gen/decoder.go b/vendor/github.com/mailru/easyjson/gen/decoder.go index b24b11ce2..3c8f8f8d0 100644 --- a/vendor/github.com/mailru/easyjson/gen/decoder.go +++ b/vendor/github.com/mailru/easyjson/gen/decoder.go @@ -180,7 +180,7 @@ func (g *Generator) genTypeDecoderNoCheck(t reflect.Type, out string, tags field fmt.Fprintln(g.out, ws+" for !in.IsDelim(']') {") fmt.Fprintln(g.out, ws+" if "+iterVar+" < "+fmt.Sprint(length)+" {") - if err := g.genTypeDecoder(elem, out+"["+iterVar+"]", tags, indent+3); err != nil { + if err := g.genTypeDecoder(elem, "("+out+")["+iterVar+"]", tags, indent+3); err != nil { return err } diff --git a/vendor/github.com/mailru/easyjson/gen/encoder.go b/vendor/github.com/mailru/easyjson/gen/encoder.go index 2775647b2..293a66a47 100644 --- a/vendor/github.com/mailru/easyjson/gen/encoder.go +++ b/vendor/github.com/mailru/easyjson/gen/encoder.go @@ -175,7 +175,7 @@ func (g *Generator) genTypeEncoderNoCheck(t reflect.Type, in string, tags fieldT fmt.Fprintln(g.out, ws+" out.RawByte(',')") fmt.Fprintln(g.out, ws+" }") - if err := g.genTypeEncoder(elem, in+"["+iVar+"]", tags, indent+1, false); err != nil { + if err := g.genTypeEncoder(elem, "("+in+")["+iVar+"]", tags, indent+1, false); err != nil { return err } diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index 18d65cd5a..0fd9b122f 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -1088,7 +1088,7 @@ func (r *Lexer) JsonNumber() json.Number { } if !r.Ok() { r.errInvalidToken("json.Number") - return json.Number("0") + return json.Number("") } switch r.token.kind { @@ -1096,9 +1096,12 @@ func (r *Lexer) JsonNumber() json.Number { return json.Number(r.String()) case tokenNumber: return json.Number(r.Raw()) + case tokenNull: + r.Null() + return json.Number("") default: r.errSyntax() - return json.Number("0") + return json.Number("") } } diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go b/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go index 4ce4abe6a..529a270b8 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer_test.go @@ -25,9 +25,9 @@ func TestString(t *testing.T) { {toParse: `"test"junk`, want: "test"}, - {toParse: `5`, wantError: true}, // not a string - {toParse: `"\x"`, wantError: true}, // invalid escape - {toParse: `"\ud800"`, want: "�"}, // invalid utf-8 char; return replacement char + {toParse: `5`, wantError: true}, // not a string + {toParse: `"\x"`, wantError: true}, // invalid escape + {toParse: `"\ud800"`, want: "�"}, // invalid utf-8 char; return replacement char } { l := Lexer{Data: []byte(test.toParse)} @@ -269,16 +269,19 @@ func TestJsonNumber(t *testing.T) { {toParse: `"0.12"`, want: json.Number("0.12"), wantValue: 0.12}, {toParse: `"25E-4"`, want: json.Number("25E-4"), wantValue: 25E-4}, - {toParse: `"a""`, wantValueError: true}, + {toParse: `"foo"`, want: json.Number("foo"), wantValueError: true}, + {toParse: `null`, want: json.Number(""), wantValueError: true}, - {toParse: `[1]`, wantLexerError: true}, - {toParse: `{}`, wantLexerError: true}, - {toParse: `a`, wantLexerError: true}, + {toParse: `"a""`, want: json.Number("a"), wantValueError: true}, + + {toParse: `[1]`, want: json.Number(""), wantLexerError: true, wantValueError: true}, + {toParse: `{}`, want: json.Number(""), wantLexerError: true, wantValueError: true}, + {toParse: `a`, want: json.Number(""), wantLexerError: true, wantValueError: true}, } { l := Lexer{Data: []byte(test.toParse)} got := l.JsonNumber() - if got != test.want && !test.wantLexerError && !test.wantValueError { + if got != test.want { t.Errorf("[%d, %q] JsonNumber() = %v; want %v", i, test.toParse, got, test.want) } @@ -303,7 +306,7 @@ func TestJsonNumber(t *testing.T) { } if valueErr != nil && !test.wantValueError { - t.Errorf("[%d, %q] JsonNumber() value error: %v", i, test.toParse, err) + t.Errorf("[%d, %q] JsonNumber() value error: %v", i, test.toParse, valueErr) } else if valueErr == nil && test.wantValueError { t.Errorf("[%d, %q] JsonNumber() ok; want value error", i, test.toParse) } diff --git a/vendor/github.com/mailru/easyjson/tests/basic_test.go b/vendor/github.com/mailru/easyjson/tests/basic_test.go index 64649c47c..28f0fdf25 100644 --- a/vendor/github.com/mailru/easyjson/tests/basic_test.go +++ b/vendor/github.com/mailru/easyjson/tests/basic_test.go @@ -49,6 +49,7 @@ var testCases = []struct { {&mapUint64StringValue, mapUint64StringValueString}, {&mapUintptrStringValue, mapUintptrStringValueString}, {&intKeyedMapStructValue, intKeyedMapStructValueString}, + {&intArrayStructValue, intArrayStructValueString}, } func TestMarshal(t *testing.T) { diff --git a/vendor/github.com/mailru/easyjson/tests/data.go b/vendor/github.com/mailru/easyjson/tests/data.go index f6d6653c0..8d5132d51 100644 --- a/vendor/github.com/mailru/easyjson/tests/data.go +++ b/vendor/github.com/mailru/easyjson/tests/data.go @@ -766,3 +766,21 @@ var intKeyedMapStructValueString = `{` + `"foo":{"42":"life"},` + `"bar":{"32":{"354634382":"life"}}` + `}` + +type IntArray [2]int + +//easyjson:json +type IntArrayStruct struct { + Pointer *IntArray `json:"pointer"` + Value IntArray `json:"value"` +} + +var intArrayStructValue = IntArrayStruct{ + Pointer: &IntArray{1, 2}, + Value: IntArray{1, 2}, +} + +var intArrayStructValueString = `{` + + `"pointer":[1,2],` + + `"value":[1,2]` + + `}` diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go index dbec638e5..742e57547 100644 --- a/vendor/github.com/prometheus/common/route/route.go +++ b/vendor/github.com/prometheus/common/route/route.go @@ -46,6 +46,10 @@ func (r *Router) WithPrefix(prefix string) *Router { // handle turns a HandlerFunc into an httprouter.Handle. func (r *Router) handle(handlerName string, h http.HandlerFunc) httprouter.Handle { + if r.instrh != nil { + // This needs to be outside the closure to avoid data race when reading and writing to 'h'. + h = r.instrh(handlerName, h) + } return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -53,9 +57,6 @@ func (r *Router) handle(handlerName string, h http.HandlerFunc) httprouter.Handl for _, p := range params { ctx = context.WithValue(ctx, param(p.Key), p.Value) } - if r.instrh != nil { - h = r.instrh(handlerName, h) - } h(w, req.WithContext(ctx)) } } diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go index e2185b782..651bf6819 100644 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package nfsd implements parsing of /proc/net/rpc/nfsd. +// Package nfs implements parsing of /proc/net/rpc/nfsd. // Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ package nfs @@ -136,8 +136,8 @@ type ClientV4Stats struct { Setattr uint64 FsInfo uint64 Renew uint64 - SetClientId uint64 - SetClientIdConfirm uint64 + SetClientID uint64 + SetClientIDConfirm uint64 Lock uint64 Lockt uint64 Locku uint64 @@ -156,13 +156,13 @@ type ClientV4Stats struct { ReadDir uint64 ServerCaps uint64 DelegReturn uint64 - GetAcl uint64 - SetAcl uint64 + GetACL uint64 + SetACL uint64 FsLocations uint64 ReleaseLockowner uint64 Secinfo uint64 FsidPresent uint64 - ExchangeId uint64 + ExchangeID uint64 CreateSession uint64 DestroySession uint64 Sequence uint64 @@ -173,11 +173,11 @@ type ClientV4Stats struct { LayoutCommit uint64 LayoutReturn uint64 SecinfoNoName uint64 - TestStateId uint64 - FreeStateId uint64 + TestStateID uint64 + FreeStateID uint64 GetDeviceList uint64 BindConnToSession uint64 - DestroyClientId uint64 + DestroyClientID uint64 Seek uint64 Allocate uint64 DeAllocate uint64 @@ -238,7 +238,7 @@ type V4Ops struct { RelLockOwner uint64 } -// RPCStats models all stats from /proc/net/rpc/nfs. +// ClientRPCStats models all stats from /proc/net/rpc/nfs. type ClientRPCStats struct { Network Network ClientRPC ClientRPC diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go index 8f568f011..95a83cc5b 100644 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -204,8 +204,8 @@ func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { Setattr: v[10], FsInfo: v[11], Renew: v[12], - SetClientId: v[13], - SetClientIdConfirm: v[14], + SetClientID: v[13], + SetClientIDConfirm: v[14], Lock: v[15], Lockt: v[16], Locku: v[17], @@ -224,13 +224,13 @@ func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { ReadDir: v[30], ServerCaps: v[31], DelegReturn: v[32], - GetAcl: v[33], - SetAcl: v[34], + GetACL: v[33], + SetACL: v[34], FsLocations: v[35], ReleaseLockowner: v[36], Secinfo: v[37], FsidPresent: v[38], - ExchangeId: v[39], + ExchangeID: v[39], CreateSession: v[40], DestroySession: v[41], Sequence: v[42], @@ -241,11 +241,11 @@ func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { LayoutCommit: v[47], LayoutReturn: v[48], SecinfoNoName: v[49], - TestStateId: v[50], - FreeStateId: v[51], + TestStateID: v[50], + FreeStateID: v[51], GetDeviceList: v[52], BindConnToSession: v[53], - DestroyClientId: v[54], + DestroyClientID: v[54], Seek: v[55], Allocate: v[56], DeAllocate: v[57], diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go index 372f9be3d..8ebcfd16e 100644 --- a/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go @@ -108,8 +108,8 @@ proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 Setattr: 73, FsInfo: 68, Renew: 83, - SetClientId: 12, - SetClientIdConfirm: 84, + SetClientID: 12, + SetClientIDConfirm: 84, Lock: 39, Lockt: 68, Locku: 59, @@ -128,13 +128,13 @@ proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 ReadDir: 66, ServerCaps: 56, DelegReturn: 97, - GetAcl: 36, - SetAcl: 49, + GetACL: 36, + SetACL: 49, FsLocations: 32, ReleaseLockowner: 85, Secinfo: 81, FsidPresent: 11, - ExchangeId: 58, + ExchangeID: 58, CreateSession: 32, DestroySession: 67, Sequence: 13, @@ -145,11 +145,11 @@ proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 LayoutCommit: 26, LayoutReturn: 1337, SecinfoNoName: 0, - TestStateId: 0, - FreeStateId: 0, + TestStateID: 0, + FreeStateID: 0, GetDeviceList: 0, BindConnToSession: 0, - DestroyClientId: 0, + DestroyClientID: 0, Seek: 0, Allocate: 0, DeAllocate: 0, @@ -234,8 +234,8 @@ proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 Setattr: 0, FsInfo: 0, Renew: 0, - SetClientId: 1, - SetClientIdConfirm: 1, + SetClientID: 1, + SetClientIDConfirm: 1, Lock: 0, Lockt: 0, Locku: 0, @@ -254,13 +254,13 @@ proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ReadDir: 0, ServerCaps: 0, DelegReturn: 0, - GetAcl: 0, - SetAcl: 0, + GetACL: 0, + SetACL: 0, FsLocations: 0, ReleaseLockowner: 0, Secinfo: 0, FsidPresent: 0, - ExchangeId: 0, + ExchangeID: 0, CreateSession: 0, DestroySession: 0, Sequence: 0, @@ -271,11 +271,11 @@ proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 LayoutCommit: 0, LayoutReturn: 0, SecinfoNoName: 0, - TestStateId: 0, - FreeStateId: 0, + TestStateID: 0, + FreeStateID: 0, GetDeviceList: 0, BindConnToSession: 0, - DestroyClientId: 0, + DestroyClientID: 0, Seek: 0, Allocate: 0, DeAllocate: 0, diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml index 685bbb105..56c5d095b 100644 --- a/vendor/go.opencensus.io/.travis.yml +++ b/vendor/go.opencensus.io/.travis.yml @@ -2,8 +2,7 @@ language: go go: # 1.8 is tested by AppVeyor - - 1.9.x - - master + - 1.10.x go_import_path: go.opencensus.io diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index 575673ff7..c637a6709 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -34,27 +34,29 @@ Currently, OpenCensus supports: * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +Start with instrumenting HTTP and gRPC clients and servers, +then add additional custom instrumentation if needed. + +* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http) +* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc) + + ## Tags Tags represent propagated key-value pairs. They are propagated using context.Context in the same process or can be encoded to be transmitted on the wire and decoded back to a tag.Map at the destination. -### Getting a key by a name - -A key is defined by its name. To use a key, a user needs to know its name and type. - -[embedmd]:# (tags.go stringKey) -```go -// Get a key to represent user OS. -key, err := tag.NewKey("my.org/keys/user-os") -if err != nil { - log.Fatal(err) -} -``` - -### Creating tags and propagating them - Package tag provides a builder to create tag maps and put it into the current context. To propagate a tag map to downstream methods and RPCs, New @@ -63,15 +65,6 @@ If there is already a tag map in the current context, it will be replaced. [embedmd]:# (tags.go new) ```go -osKey, err := tag.NewKey("my.org/keys/user-os") -if err != nil { - log.Fatal(err) -} -userIDKey, err := tag.NewKey("my.org/keys/user-id") -if err != nil { - log.Fatal(err) -} - ctx, err = tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), @@ -81,49 +74,20 @@ if err != nil { } ``` -### Propagating a tag map in a context - -If you have access to a tag.Map, you can also -propagate it in the current context: - -[embedmd]:# (tags.go newContext) -```go -m := tag.FromContext(ctx) -``` - -In order to update existing tags from the current context, -use New and pass the returned context. - -[embedmd]:# (tags.go replaceTagMap) -```go -ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Upsert(userIDKey, "fff0989878"), -) -if err != nil { - log.Fatal(err) -} -``` - - ## Stats -### Measures +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. -Measures are used for recording data points with associated units. -Creating a Measure: +OpenCensus stats collection happens in two stages: -[embedmd]:# (stats.go measure) -```go -videoSize, err := stats.Int64("my.org/video_size", "processed video size", "MB") -if err != nil { - log.Fatal(err) -} -``` +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data -### Recording Measurements +### Recording -Measurements are data points associated with Measures. +Measurements are data points associated with a measure. Recording implicitly tags the set of Measurements with the tags from the provided context: @@ -135,7 +99,7 @@ stats.Record(ctx, videoSize.M(102478)) ### Views Views are how Measures are aggregated. You can think of them as queries over the -set of recorded data points (Measurements). +set of recorded data points (measurements). Views have two parts: the tags to group by and the aggregation type used. @@ -147,15 +111,13 @@ Currently four types of aggregations are supported: [embedmd]:# (stats.go aggs) ```go -distAgg := view.DistributionAggregation{0, 1 << 32, 2 << 32, 3 << 32} -countAgg := view.CountAggregation{} -sumAgg := view.SumAggregation{} -meanAgg := view.MeanAggregation{} +distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) +countAgg := view.Count() +sumAgg := view.Sum() +meanAgg := view.Mean() ``` -Here we create a view with the DistributionAggregation over our Measure. -All Measurements will be aggregated together irrespective of their tags, -i.e. no grouping by tag. +Here we create a view with the DistributionAggregation over our measure. [embedmd]:# (stats.go view) ```go @@ -163,7 +125,7 @@ if err = view.Subscribe(&view.View{ Name: "my.org/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, - Aggregation: view.DistributionAggregation([]float64{0, 1 << 32, 2 << 32, 3 << 32}), + Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to subscribe to view: %v", err) } @@ -172,48 +134,14 @@ if err = view.Subscribe(&view.View{ Subscribe begins collecting data for the view. Subscribed views' data will be exported via the registered exporters. -[embedmd]:# (stats.go registerExporter) -```go -// Register an exporter to be able to retrieve -// the data from the subscribed views. -view.RegisterExporter(&exporter{}) -``` - -An example logger exporter is below: - -[embedmd]:# (stats.go exporter) -```go - -type exporter struct{} - -func (e *exporter) ExportView(vd *view.Data) { - log.Println(vd) -} - -``` - -Configure the default interval between reports of collected data. -This is a system wide interval and impacts all views. The default -interval duration is 10 seconds. - -[embedmd]:# (stats.go reportingPeriod) -```go -view.SetReportingPeriod(5 * time.Second) -``` - - ## Traces -### Starting and ending a span - [embedmd]:# (trace.go startend) ```go ctx, span := trace.StartSpan(ctx, "your choice of name") defer span.End() ``` -More tracing examples are coming soon... - ## Profiles OpenCensus tags can be applied as profiler labels @@ -239,7 +167,6 @@ A screenshot of the CPU profile from the program above: ![CPU profile](https://i.imgur.com/jBKjlkw.png) - [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master [travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go [appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true @@ -257,4 +184,4 @@ A screenshot of the CPU profile from the program above: [exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver [exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin [exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger -[exporter-xray]: https://godoc.org/go.opencensus.io/exporter/xray +[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws diff --git a/vendor/go.opencensus.io/examples/grpc/README.md b/vendor/go.opencensus.io/examples/grpc/README.md index 7d3a8d0cb..46af24f40 100644 --- a/vendor/go.opencensus.io/examples/grpc/README.md +++ b/vendor/go.opencensus.io/examples/grpc/README.md @@ -27,5 +27,5 @@ You will see traces and stats exported on the stdout. You can use one of the to upload collected data to the backend of your choice. You can also see the z-pages provided from the server: -* Traces: http://localhost:8081/tracez -* RPCs: http://localhost:8081/rpcz +* Traces: http://localhost:8081/debug/tracez +* RPCs: http://localhost:8081/debug/rpcz diff --git a/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go b/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go index 2745d26bf..fc6c48e6f 100644 --- a/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go +++ b/vendor/go.opencensus.io/examples/grpc/helloworld_server/main.go @@ -18,13 +18,16 @@ package main import ( "log" + "math/rand" "net" "net/http" + "time" "go.opencensus.io/examples/exporter" pb "go.opencensus.io/examples/grpc/proto" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" + "go.opencensus.io/trace" "go.opencensus.io/zpages" "golang.org/x/net/context" "google.golang.org/grpc" @@ -38,17 +41,23 @@ type server struct{} // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + ctx, span := trace.StartSpan(ctx, "sleep") + time.Sleep(time.Duration(rand.Float64() * float64(time.Second))) + span.End() return &pb.HelloReply{Message: "Hello " + in.Name}, nil } func main() { - go func() { log.Fatal(http.ListenAndServe(":8081", zpages.Handler)) }() + go func() { + http.Handle("/debug/", http.StripPrefix("/debug", zpages.Handler)) + log.Fatal(http.ListenAndServe(":8081", nil)) + }() // Register stats and trace exporters to export // the collected data. view.RegisterExporter(&exporter.PrintExporter{}) // Subscribe to collect server request count. - if err := ocgrpc.ServerRequestCountView.Subscribe(); err != nil { + if err := view.Subscribe(ocgrpc.DefaultServerViews...); err != nil { log.Fatal(err) } diff --git a/vendor/go.opencensus.io/examples/helloworld/main.go b/vendor/go.opencensus.io/examples/helloworld/main.go index ede25cc26..948872160 100644 --- a/vendor/go.opencensus.io/examples/helloworld/main.go +++ b/vendor/go.opencensus.io/examples/helloworld/main.go @@ -61,14 +61,13 @@ func main() { // Create view to see the processed video size // distribution broken down by frontend. // Subscribe will allow view data to be exported. - err = view.Subscribe(&view.View{ + if err := view.Subscribe(&view.View{ Name: "my.org/views/video_size", Description: "processed video size over time", TagKeys: []tag.Key{frontendKey}, Measure: videoSize, - Aggregation: view.DistributionAggregation{0, 1 << 16, 1 << 32}, - }) - if err != nil { + Aggregation: view.Distribution(0, 1<<16, 1<<32), + }); err != nil { log.Fatalf("Cannot subscribe to the view: %v", err) } diff --git a/vendor/go.opencensus.io/exporter/jaeger/agent.go b/vendor/go.opencensus.io/exporter/jaeger/agent.go new file mode 100644 index 000000000..362a571a0 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/jaeger/agent.go @@ -0,0 +1,89 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "fmt" + "io" + "net" + + "git.apache.org/thrift.git/lib/go/thrift" + gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" +) + +// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent +const udpPacketMaxLength = 65000 + +// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. +type agentClientUDP struct { + gen.Agent + io.Closer + + connUDP *net.UDPConn + client *gen.AgentClient + maxPacketSize int // max size of datagram in bytes + thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span +} + +// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. +func newAgentClientUDP(hostPort string, maxPacketSize int) (*agentClientUDP, error) { + if maxPacketSize == 0 { + maxPacketSize = udpPacketMaxLength + } + + thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) + protocolFactory := thrift.NewTCompactProtocolFactory() + client := gen.NewAgentClientFactory(thriftBuffer, protocolFactory) + + destAddr, err := net.ResolveUDPAddr("udp", hostPort) + if err != nil { + return nil, err + } + + connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) + if err != nil { + return nil, err + } + if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { + return nil, err + } + + clientUDP := &agentClientUDP{ + connUDP: connUDP, + client: client, + maxPacketSize: maxPacketSize, + thriftBuffer: thriftBuffer} + return clientUDP, nil +} + +// EmitBatch implements EmitBatch() of Agent interface +func (a *agentClientUDP) EmitBatch(batch *gen.Batch) error { + a.thriftBuffer.Reset() + a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages + if err := a.client.EmitBatch(batch); err != nil { + return err + } + if a.thriftBuffer.Len() > a.maxPacketSize { + return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d", + a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) + } + _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) + return err +} + +// Close implements Close() of io.Closer and closes the underlying UDP connection. +func (a *agentClientUDP) Close() error { + return a.connUDP.Close() +} diff --git a/vendor/go.opencensus.io/examples/trace/jaeger/main.go b/vendor/go.opencensus.io/exporter/jaeger/example/main.go similarity index 100% rename from vendor/go.opencensus.io/examples/trace/jaeger/main.go rename to vendor/go.opencensus.io/exporter/jaeger/example/main.go diff --git a/vendor/go.opencensus.io/exporter/jaeger/example_test.go b/vendor/go.opencensus.io/exporter/jaeger/example_test.go index 8e4c4c5c8..7865d6ef5 100644 --- a/vendor/go.opencensus.io/exporter/jaeger/example_test.go +++ b/vendor/go.opencensus.io/exporter/jaeger/example_test.go @@ -21,7 +21,7 @@ import ( "go.opencensus.io/trace" ) -func Example() { +func ExampleNewExporter_collector() { // Register the Jaeger exporter to be able to retrieve // the collected spans. exporter, err := jaeger.NewExporter(jaeger.Options{ @@ -33,3 +33,16 @@ func Example() { } trace.RegisterExporter(exporter) } + +func ExampleNewExporter_agent() { + // Register the Jaeger exporter to be able to retrieve + // the collected spans. + exporter, err := jaeger.NewExporter(jaeger.Options{ + AgentEndpoint: "localhost:6831", + ServiceName: "trace-demo", + }) + if err != nil { + log.Fatal(err) + } + trace.RegisterExporter(exporter) +} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/agent.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/agent.go new file mode 100644 index 000000000..e89bf4994 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/agent.go @@ -0,0 +1,244 @@ +// Autogenerated by Thrift Compiler (0.9.3) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package jaeger + +import ( + "bytes" + "context" + "fmt" + + "git.apache.org/thrift.git/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = bytes.Equal + +type Agent interface { + // Parameters: + // - Batch + EmitBatch(batch *Batch) (err error) +} + +type AgentClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { + return &AgentClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { + return &AgentClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - Batch +func (p *AgentClient) EmitBatch(batch *Batch) (err error) { + if err = p.sendEmitBatch(batch); err != nil { + return + } + return +} + +func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { + return + } + args := AgentEmitBatchArgs{ + Batch: batch, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush(context.Background()) +} + +type AgentProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Agent +} + +func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewAgentProcessor(handler Agent) *AgentProcessor { + + self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} + self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} + return self0 +} + +func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + ctx := context.Background() + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return false, err + } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x1.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush(ctx) + return false, x1 +} + +type agentProcessorEmitBatch struct { + handler Agent +} + +func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := AgentEmitBatchArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + return false, err + } + + iprot.ReadMessageEnd() + var err2 error + if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { + return true, err2 + } + return true, nil +} + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Batch +type AgentEmitBatchArgs struct { + Batch *Batch `thrift:"batch,1" json:"batch"` +} + +func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { + return &AgentEmitBatchArgs{} +} + +var AgentEmitBatchArgs_Batch_DEFAULT *Batch + +func (p *AgentEmitBatchArgs) GetBatch() *Batch { + if !p.IsSetBatch() { + return AgentEmitBatchArgs_Batch_DEFAULT + } + return p.Batch +} +func (p *AgentEmitBatchArgs) IsSetBatch() bool { + return p.Batch != nil +} + +func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { + p.Batch = &Batch{} + if err := p.Batch.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) + } + return nil +} + +func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) + } + if err := p.Batch.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) + } + return err +} + +func (p *AgentEmitBatchArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) +} diff --git a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/jaeger.go b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/jaeger.go index 64d6b714a..cb52d42ef 100644 --- a/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/jaeger.go +++ b/vendor/go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger/jaeger.go @@ -2143,7 +2143,7 @@ func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TP oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) x9.Write(oprot) oprot.WriteMessageEnd() - oprot.Flush() + oprot.Flush(ctx) return false, x9 } @@ -2160,7 +2160,7 @@ func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int oprot.WriteMessageBegin("submitBatches", thrift.EXCEPTION, seqId) x.Write(oprot) oprot.WriteMessageEnd() - oprot.Flush() + oprot.Flush(ctx) return false, err } @@ -2173,7 +2173,7 @@ func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int oprot.WriteMessageBegin("submitBatches", thrift.EXCEPTION, seqId) x.Write(oprot) oprot.WriteMessageEnd() - oprot.Flush() + oprot.Flush(ctx) return true, err2 } else { result.Success = retval @@ -2187,7 +2187,7 @@ func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { err = err2 } - if err2 = oprot.Flush(); err == nil && err2 != nil { + if err2 = oprot.Flush(ctx); err == nil && err2 != nil { err = err2 } if err != nil { diff --git a/vendor/go.opencensus.io/exporter/jaeger/jaeger.go b/vendor/go.opencensus.io/exporter/jaeger/jaeger.go index 43384a90a..8adafbb24 100644 --- a/vendor/go.opencensus.io/exporter/jaeger/jaeger.go +++ b/vendor/go.opencensus.io/exporter/jaeger/jaeger.go @@ -39,6 +39,10 @@ type Options struct { // For example, http://localhost:14268. Endpoint string + // AgentEndpoint instructs exporter to send spans to jaeger-agent at this address. + // For example, localhost:6831. + AgentEndpoint string + // OnError is the hook to be called when there is // an error occurred when uploading the stats data. // If no custom hook is set, errors are logged. @@ -61,10 +65,20 @@ type Options struct { // the collected spans to Jaeger. func NewExporter(o Options) (*Exporter, error) { endpoint := o.Endpoint - if endpoint == "" { + if endpoint == "" && o.AgentEndpoint == "" { return nil, errors.New("missing endpoint for Jaeger exporter") } - endpoint = endpoint + "/api/traces?format=jaeger.thrift" + + var client *agentClientUDP + var err error + if endpoint != "" { + endpoint = endpoint + "/api/traces?format=jaeger.thrift" + } else { + client, err = newAgentClientUDP(o.AgentEndpoint, udpPacketMaxLength) + if err != nil { + return nil, err + } + } onError := func(err error) { if o.OnError != nil { o.OnError(err) @@ -77,10 +91,12 @@ func NewExporter(o Options) (*Exporter, error) { service = defaultServiceName } e := &Exporter{ - endpoint: endpoint, - username: o.Username, - password: o.Password, - service: service, + endpoint: endpoint, + agentEndpoint: o.AgentEndpoint, + client: client, + username: o.Username, + password: o.Password, + service: service, } bundler := bundler.NewBundler((*gen.Span)(nil), func(bundle interface{}) { if err := e.upload(bundle.([]*gen.Span)); err != nil { @@ -93,35 +109,47 @@ func NewExporter(o Options) (*Exporter, error) { // Exporter is an implementation of trace.Exporter that uploads spans to Jaeger. type Exporter struct { - endpoint string - service string - bundler *bundler.Bundler + endpoint string + agentEndpoint string + service string + bundler *bundler.Bundler + client *agentClientUDP username, password string } var _ trace.Exporter = (*Exporter)(nil) -// TODO(jbd): Also implement propagation.HTTPFormat. - // ExportSpan exports a SpanData to Jaeger. func (e *Exporter) ExportSpan(data *trace.SpanData) { - var tags []*gen.Tag + e.bundler.Add(spanDataToThrift(data), 1) + // TODO(jbd): Handle oversized bundlers. +} + +func spanDataToThrift(data *trace.SpanData) *gen.Span { + tags := make([]*gen.Tag, 0, len(data.Attributes)) for k, v := range data.Attributes { tag := attributeToTag(k, v) if tag != nil { tags = append(tags, tag) } } + + tags = append(tags, + attributeToTag("status.code", data.Status.Code), + attributeToTag("status.message", data.Status.Message), + ) + var logs []*gen.Log for _, a := range data.Annotations { - var fields []*gen.Tag + fields := make([]*gen.Tag, 0, len(a.Attributes)) for k, v := range a.Attributes { tag := attributeToTag(k, v) if tag != nil { - fields = append(tags, tag) + fields = append(fields, tag) } } + fields = append(fields, attributeToTag("message", a.Message)) logs = append(logs, &gen.Log{ Timestamp: a.Time.UnixNano() / 1000, Fields: fields, @@ -135,12 +163,12 @@ func (e *Exporter) ExportSpan(data *trace.SpanData) { SpanId: bytesToInt64(link.SpanID[:]), }) } - span := &gen.Span{ + return &gen.Span{ TraceIdHigh: bytesToInt64(data.TraceID[0:8]), TraceIdLow: bytesToInt64(data.TraceID[8:16]), SpanId: bytesToInt64(data.SpanID[:]), ParentSpanId: bytesToInt64(data.ParentSpanID[:]), - OperationName: data.Name, + OperationName: name(data), Flags: int32(data.TraceOptions), StartTime: data.StartTime.UnixNano() / 1000, Duration: data.EndTime.Sub(data.StartTime).Nanoseconds() / 1000, @@ -148,8 +176,17 @@ func (e *Exporter) ExportSpan(data *trace.SpanData) { Logs: logs, References: refs, } - e.bundler.Add(span, 1) - // TODO(jbd): Handle oversized bundlers. +} + +func name(sd *trace.SpanData) string { + n := sd.Name + switch sd.SpanKind { + case trace.SpanKindClient: + n = "Sent." + n + case trace.SpanKindServer: + n = "Recv." + n + } + return n } func attributeToTag(key string, a interface{}) *gen.Tag { @@ -159,16 +196,26 @@ func attributeToTag(key string, a interface{}) *gen.Tag { tag = &gen.Tag{ Key: key, VBool: &value, + VType: gen.TagType_BOOL, } case string: tag = &gen.Tag{ - Key: key, - VStr: &value, + Key: key, + VStr: &value, + VType: gen.TagType_STRING, } case int64: tag = &gen.Tag{ Key: key, VLong: &value, + VType: gen.TagType_LONG, + } + case int32: + v := int64(value) + tag = &gen.Tag{ + Key: key, + VLong: &v, + VType: gen.TagType_LONG, } } return tag @@ -188,6 +235,17 @@ func (e *Exporter) upload(spans []*gen.Span) error { ServiceName: e.service, }, } + if e.endpoint != "" { + return e.uploadCollector(batch) + } + return e.uploadAgent(batch) +} + +func (e *Exporter) uploadAgent(batch *gen.Batch) error { + return e.client.EmitBatch(batch) +} + +func (e *Exporter) uploadCollector(batch *gen.Batch) error { body, err := serialize(batch) if err != nil { return err diff --git a/vendor/go.opencensus.io/exporter/jaeger/jaeger_test.go b/vendor/go.opencensus.io/exporter/jaeger/jaeger_test.go index 395d29a34..977ff854c 100644 --- a/vendor/go.opencensus.io/exporter/jaeger/jaeger_test.go +++ b/vendor/go.opencensus.io/exporter/jaeger/jaeger_test.go @@ -16,7 +16,12 @@ package jaeger import ( "fmt" + "reflect" "testing" + "time" + + gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" + "go.opencensus.io/trace" ) // TODO(jbd): Test export. @@ -49,3 +54,82 @@ func Test_bytesToInt64(t *testing.T) { }) } } + +func Test_spanDataToThrift(t *testing.T) { + now := time.Now() + + answerValue := int64(42) + keyValue := "value" + resultValue := true + statusCodeValue := int64(2) + statusMessage := "error" + + tests := []struct { + name string + data *trace.SpanData + want *gen.Span + }{ + { + name: "no parent", + data: &trace.SpanData{ + SpanContext: trace.SpanContext{ + TraceID: trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, + SpanID: trace.SpanID{1, 2, 3, 4, 5, 6, 7, 8}, + }, + Name: "/foo", + StartTime: now, + EndTime: now, + Attributes: map[string]interface{}{ + "key": keyValue, + }, + Annotations: []trace.Annotation{ + { + Time: now, + Message: statusMessage, + Attributes: map[string]interface{}{ + "answer": answerValue, + }, + }, + { + Time: now, + Message: statusMessage, + Attributes: map[string]interface{}{ + "result": resultValue, + }, + }, + }, + Status: trace.Status{Code: 2, Message: "error"}, + }, + want: &gen.Span{ + TraceIdLow: 651345242494996240, + TraceIdHigh: 72623859790382856, + SpanId: 72623859790382856, + OperationName: "/foo", + StartTime: now.UnixNano() / 1000, + Duration: 0, + Tags: []*gen.Tag{ + {Key: "key", VType: gen.TagType_STRING, VStr: &keyValue}, + {Key: "status.code", VType: gen.TagType_LONG, VLong: &statusCodeValue}, + {Key: "status.message", VType: gen.TagType_STRING, VStr: &statusMessage}, + }, + Logs: []*gen.Log{ + {Timestamp: now.UnixNano() / 1000, Fields: []*gen.Tag{ + {Key: "answer", VType: gen.TagType_LONG, VLong: &answerValue}, + {Key: "message", VType: gen.TagType_STRING, VStr: &statusMessage}, + }}, + {Timestamp: now.UnixNano() / 1000, Fields: []*gen.Tag{ + {Key: "result", VType: gen.TagType_BOOL, VBool: &resultValue}, + {Key: "message", VType: gen.TagType_STRING, VStr: &statusMessage}, + }}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := spanDataToThrift(tt.data); !reflect.DeepEqual(got, tt.want) { + t.Errorf("spanDataToThrift() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/vendor/go.opencensus.io/examples/stats/prometheus/main.go b/vendor/go.opencensus.io/exporter/prometheus/example/main.go similarity index 94% rename from vendor/go.opencensus.io/examples/stats/prometheus/main.go rename to vendor/go.opencensus.io/exporter/prometheus/example/main.go index 1a08d564c..2968bfd00 100644 --- a/vendor/go.opencensus.io/examples/stats/prometheus/main.go +++ b/vendor/go.opencensus.io/exporter/prometheus/example/main.go @@ -52,21 +52,20 @@ func main() { // Create view to see the amount of video processed // Subscribe will allow view data to be exported. // Once no longer needed, you can unsubscribe from the view. - err = view.Subscribe( + if err = view.Subscribe( &view.View{ Name: "video_count", Description: "number of videos processed over time", Measure: videoCount, - Aggregation: &view.CountAggregation{}, + Aggregation: view.Count(), }, &view.View{ Name: "video_size", Description: "processed video size over time", Measure: videoSize, - Aggregation: view.DistributionAggregation{0, 1 << 16, 1 << 32}, + Aggregation: view.Distribution(0, 1<<16, 1<<32), }, - ) - if err != nil { + ); err != nil { log.Fatalf("Cannot subscribe to the view: %v", err) } diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go index ab616331a..4f5183d66 100644 --- a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go @@ -230,25 +230,21 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { - switch agg := v.Aggregation.(type) { - case view.CountAggregation: - data := row.Data.(*view.CountData) + switch data := row.Data.(type) { + case *view.CountData: return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(*data), tagValues(row.Tags)...) - case view.DistributionAggregation: - data := row.Data.(*view.DistributionData) + case *view.DistributionData: points := make(map[float64]uint64) - for i, b := range agg { + for i, b := range v.Aggregation.Buckets { points[b] = uint64(data.CountPerBucket[i]) } return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...) - case view.MeanAggregation: - data := row.Data.(*view.MeanData) + case *view.MeanData: return prometheus.NewConstSummary(desc, uint64(data.Count), data.Sum(), make(map[float64]float64), tagValues(row.Tags)...) - case view.SumAggregation: - data := row.Data.(*view.SumData) + case *view.SumData: return prometheus.NewConstMetric(desc, prometheus.UntypedValue, float64(*data), tagValues(row.Tags)...) default: diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go index b42b99503..456751fbd 100644 --- a/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus_test.go @@ -33,7 +33,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -func newView(measureName string, agg view.Aggregation) *view.View { +func newView(measureName string, agg *view.Aggregation) *view.View { m, err := stats.Int64(measureName, "bytes", stats.UnitBytes) if err != nil { log.Fatal(err) @@ -59,13 +59,13 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) { }{ 0: { vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m1", view.CountAggregation{}), + View: newView("TestOnlyCumulativeWindowSupported/m1", view.Count()), }, want: 0, // no rows present }, 1: { vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m2", view.CountAggregation{}), + View: newView("TestOnlyCumulativeWindowSupported/m2", view.Count()), Rows: []*view.Row{ {Data: &count1}, }, @@ -74,7 +74,7 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) { }, 2: { vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m3", view.MeanAggregation{}), + View: newView("TestOnlyCumulativeWindowSupported/m3", view.Mean()), Rows: []*view.Row{ {Data: &mean1}, }, @@ -146,8 +146,8 @@ func TestCollectNonRacy(t *testing.T) { count1 := view.CountData(1) mean1 := &view.MeanData{Mean: 4.5, Count: 5} vds := []*view.Data{ - {View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.MeanAggregation{}), Rows: []*view.Row{{Data: mean1}}}, - {View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.CountAggregation{}), Rows: []*view.Row{{Data: &count1}}}, + {View: newView(fmt.Sprintf("TestCollectNonRacy/m1-%d", i), view.Mean()), Rows: []*view.Row{{Data: mean1}}}, + {View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: &count1}}}, } for _, v := range vds { exp.ExportView(v) @@ -207,7 +207,7 @@ type vCreator struct { err error } -func (vc *vCreator) createAndSubscribe(name, description string, keys []tag.Key, measure stats.Measure, agg view.Aggregation) { +func (vc *vCreator) createAndSubscribe(name, description string, keys []tag.Key, measure stats.Measure, agg *view.Aggregation) { vc.v, vc.err = view.New(name, description, keys, measure, agg) if err := vc.v.Subscribe(); err != nil { vc.err = err @@ -234,7 +234,7 @@ func TestMetricsEndpointOutput(t *testing.T) { vc := &vCreator{} for _, m := range measures { - vc.createAndSubscribe(m.Name(), m.Description(), nil, m, view.CountAggregation{}) + vc.createAndSubscribe(m.Name(), m.Description(), nil, m, view.Count()) } if vc.err != nil { t.Fatalf("failed to create views: %v", err) diff --git a/vendor/go.opencensus.io/exporter/stackdriver/example_test.go b/vendor/go.opencensus.io/exporter/stackdriver/example_test.go index 38be71b01..6298a32ad 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/example_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/example_test.go @@ -33,8 +33,14 @@ func Example() { // Export to Stackdriver Monitoring. view.RegisterExporter(exporter) - // Subscribe views to see stats in Stackdriver Monitoring - view.Subscribe(ochttp.ClientLatencyView, ochttp.ClientResponseBytesView) + + // Subscribe views to see stats in Stackdriver Monitoring. + if err := view.Subscribe( + ochttp.ClientLatencyView, + ochttp.ClientResponseBytesView, + ); err != nil { + log.Fatal(err) + } // Export to Stackdriver Trace. trace.RegisterExporter(exporter) diff --git a/vendor/go.opencensus.io/examples/stats/stackdriver/main.go b/vendor/go.opencensus.io/exporter/stackdriver/examples/stats/main.go similarity index 95% rename from vendor/go.opencensus.io/examples/stats/stackdriver/main.go rename to vendor/go.opencensus.io/exporter/stackdriver/examples/stats/main.go index a82974c62..dab873e2a 100644 --- a/vendor/go.opencensus.io/examples/stats/stackdriver/main.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/examples/stats/main.go @@ -62,13 +62,12 @@ func main() { // Create view to see the processed video size cumulatively. // Subscribe will allow view data to be exported. // Once no longer need, you can unsubscribe from the view. - err = view.Subscribe(&view.View{ + if err := view.Subscribe(&view.View{ Name: "my.org/views/video_size_cum", Description: "processed video size over time", Measure: videoSize, - Aggregation: view.DistributionAggregation{0, 1 << 16, 1 << 32}, - }) - if err != nil { + Aggregation: view.Distribution(0, 1<<16, 1<<32), + }); err != nil { log.Fatalf("Cannot subscribe to the view: %v", err) } diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go index 2ae7dc333..0f33667a0 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver.go @@ -71,6 +71,10 @@ type Options struct { // with type global and no resource labels will be used. // Optional. Resource *monitoredrespb.MonitoredResource + + // MetricPrefix overrides the OpenCensus prefix of a stackdriver metric. + // Optional. + MetricPrefix string } // Exporter is a stats.Exporter and trace.Exporter diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go index 796cbb166..59b91303b 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stackdriver_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "go.opencensus.io/internal/testpb" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/stats/view" "go.opencensus.io/trace" @@ -81,3 +82,28 @@ func TestExport(t *testing.T) { exporter.Flush() exporter.Flush() } + +func TestGRPC(t *testing.T) { + projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") + if !ok { + t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") + } + + exporter, err := NewExporter(Options{ProjectID: projectID}) + if err != nil { + t.Fatal(err) + } + defer exporter.Flush() + + trace.RegisterExporter(exporter) + defer trace.UnregisterExporter(exporter) + view.RegisterExporter(exporter) + defer view.UnregisterExporter(exporter) + + trace.SetDefaultSampler(trace.AlwaysSample()) + + client, done := testpb.NewTestClient(t) + defer done() + + client.Single(context.Background(), &testpb.FooRequest{SleepNanos: int64(42 * time.Millisecond)}) +} diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stats.go b/vendor/go.opencensus.io/exporter/stackdriver/stats.go index 62112421b..f61bc8e4a 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stats.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stats.go @@ -22,7 +22,6 @@ import ( "net/url" "os" "path" - "reflect" "strconv" "strings" "sync" @@ -31,9 +30,10 @@ import ( "go.opencensus.io/internal" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.opencensus.io/trace" - monitoring "cloud.google.com/go/monitoring/apiv3" - timestamp "github.com/golang/protobuf/ptypes/timestamp" + "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/ptypes/timestamp" "google.golang.org/api/option" "google.golang.org/api/support/bundler" distributionpb "google.golang.org/genproto/googleapis/api/distribution" @@ -49,6 +49,7 @@ import ( const maxTimeSeriesPerUpload = 200 const opencensusTaskKey = "opencensus_task" const opencensusTaskDescription = "Opencensus task identifier" +const defaultDisplayNamePrefix = "OpenCensus" // statsExporter exports stats to the Stackdriver Monitoring. type statsExporter struct { @@ -143,7 +144,7 @@ func getTaskValue() string { // handleUpload handles uploading a slice // of Data, as well as error handling. func (e *statsExporter) handleUpload(vds ...*view.Data) { - if err := e.upload(vds); err != nil { + if err := e.uploadStats(vds); err != nil { e.onError(err) } } @@ -164,16 +165,24 @@ func (e *statsExporter) onError(err error) { log.Printf("Failed to export to Stackdriver Monitoring: %v", err) } -func (e *statsExporter) upload(vds []*view.Data) error { - ctx := context.Background() +func (e *statsExporter) uploadStats(vds []*view.Data) error { + span := trace.NewSpan( + "go.opencensus.io/exporter/stackdriver.uploadStats", + nil, + trace.StartOptions{Sampler: trace.NeverSample()}, + ) + ctx := trace.WithSpan(context.Background(), span) + defer span.End() for _, vd := range vds { if err := e.createMeasure(ctx, vd); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) return err } } for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { if err := e.c.CreateTimeSeries(ctx, req); err != nil { + span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) // TODO(jbd): Don't fail fast here, batch errors? return err } @@ -255,25 +264,29 @@ func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error var metricKind metricpb.MetricDescriptor_MetricKind var valueType metricpb.MetricDescriptor_ValueType - switch agg.(type) { - case view.CountAggregation: + switch agg.Type { + case view.AggTypeCount: valueType = metricpb.MetricDescriptor_INT64 - case view.SumAggregation: + case view.AggTypeSum: valueType = metricpb.MetricDescriptor_DOUBLE - case view.MeanAggregation: + case view.AggTypeMean: valueType = metricpb.MetricDescriptor_DISTRIBUTION - case view.DistributionAggregation: + case view.AggTypeDistribution: valueType = metricpb.MetricDescriptor_DISTRIBUTION default: - return fmt.Errorf("unsupported aggregation type: %T", agg) + return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) } metricKind = metricpb.MetricDescriptor_CUMULATIVE + displayNamePrefix := defaultDisplayNamePrefix + if e.o.MetricPrefix != "" { + displayNamePrefix = e.o.MetricPrefix + } md, err = createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{ Name: monitoring.MetricProjectPath(e.o.ProjectID), MetricDescriptor: &metricpb.MetricDescriptor{ - DisplayName: path.Join("OpenCensus", viewName), + DisplayName: path.Join(displayNamePrefix, viewName), Description: m.Description(), Unit: m.Unit(), Type: namespacedViewName(viewName, false), @@ -333,7 +346,6 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { }, }} case *view.DistributionData: - bounds := vd.Aggregation.(view.DistributionAggregation) return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ DistributionValue: &distributionpb.Distribution{ Count: v.Count, @@ -347,7 +359,7 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { BucketOptions: &distributionpb.Distribution_BucketOptions{ Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: []float64(bounds), + Bounds: vd.Aggregation.Buckets, }, }, }, @@ -392,23 +404,19 @@ func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor { return labelDescriptors } -func equalAggTagKeys(md *metricpb.MetricDescriptor, agg view.Aggregation, keys []tag.Key) error { - aggType := reflect.TypeOf(agg) - if aggType.Kind() == reflect.Ptr { // if pointer, find out the concrete type - aggType = reflect.ValueOf(agg).Elem().Type() - } +func equalAggTagKeys(md *metricpb.MetricDescriptor, agg *view.Aggregation, keys []tag.Key) error { var aggTypeMatch bool switch md.ValueType { case metricpb.MetricDescriptor_INT64: - aggTypeMatch = aggType == reflect.TypeOf(view.CountAggregation{}) + aggTypeMatch = agg.Type == view.AggTypeCount case metricpb.MetricDescriptor_DOUBLE: - aggTypeMatch = aggType == reflect.TypeOf(view.SumAggregation{}) + aggTypeMatch = agg.Type == view.AggTypeSum case metricpb.MetricDescriptor_DISTRIBUTION: - aggTypeMatch = aggType == reflect.TypeOf(view.MeanAggregation{}) || aggType == reflect.TypeOf(view.DistributionAggregation{}) + aggTypeMatch = agg.Type == view.AggTypeMean || agg.Type == view.AggTypeDistribution } if !aggTypeMatch { - return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", aggType) + return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type) } if len(md.Labels) != len(keys)+1 { diff --git a/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go b/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go index da6d0e74d..45942a5aa 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/stats_test.go @@ -88,13 +88,13 @@ func TestExporter_makeReq(t *testing.T) { Description: "desc", TagKeys: []tag.Key{key}, Measure: m, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } distView := &view.View{ Name: "distview", Description: "desc", Measure: m, - Aggregation: view.DistributionAggregation{2, 4, 7}, + Aggregation: view.Distribution(2, 4, 7), } start := time.Now() @@ -393,7 +393,7 @@ func TestExporter_makeReq_batching(t *testing.T) { Description: "desc", TagKeys: []tag.Key{key}, Measure: m, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } tests := []struct { @@ -457,7 +457,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { tests := []struct { name string md *metricpb.MetricDescriptor - agg view.Aggregation + agg *view.Aggregation keys []tag.Key wantErr bool }{ @@ -468,7 +468,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { ValueType: metricpb.MetricDescriptor_INT64, Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, }, - agg: view.CountAggregation{}, + agg: view.Count(), wantErr: false, }, { @@ -478,7 +478,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { ValueType: metricpb.MetricDescriptor_DOUBLE, Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, }, - agg: view.SumAggregation{}, + agg: view.Sum(), wantErr: false, }, { @@ -488,7 +488,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { ValueType: metricpb.MetricDescriptor_DISTRIBUTION, Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, }, - agg: view.MeanAggregation{}, + agg: view.Mean(), wantErr: false, }, { @@ -498,7 +498,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { ValueType: metricpb.MetricDescriptor_DISTRIBUTION, Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, }, - agg: view.CountAggregation{}, + agg: view.Count(), wantErr: true, }, { @@ -508,7 +508,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { ValueType: metricpb.MetricDescriptor_DOUBLE, Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, }, - agg: view.MeanAggregation{}, + agg: view.Mean(), wantErr: true, }, { @@ -522,7 +522,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { {Key: opencensusTaskKey}, }, }, - agg: view.DistributionAggregation{}, + agg: view.Distribution(), keys: []tag.Key{key1, key2}, wantErr: false, }, @@ -532,7 +532,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { MetricKind: metricpb.MetricDescriptor_CUMULATIVE, ValueType: metricpb.MetricDescriptor_DISTRIBUTION, }, - agg: view.DistributionAggregation{}, + agg: view.Distribution(), keys: []tag.Key{key1, key2}, wantErr: true, }, @@ -543,7 +543,7 @@ func TestEqualAggWindowTagKeys(t *testing.T) { ValueType: metricpb.MetricDescriptor_INT64, Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, }, - agg: &view.CountAggregation{}, + agg: view.Count(), wantErr: false, }, } @@ -581,7 +581,7 @@ func TestExporter_createMeasure(t *testing.T) { Description: "desc", TagKeys: []tag.Key{key}, Measure: m, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } data := view.CountData(0) @@ -643,7 +643,7 @@ func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) { Description: "desc", TagKeys: []tag.Key{key}, Measure: m, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } if err := view.Subscribe(v); err != nil { t.Fatal(err) diff --git a/vendor/go.opencensus.io/exporter/stackdriver/trace.go b/vendor/go.opencensus.io/exporter/stackdriver/trace.go index 8db648f6e..7cbad39cc 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/trace.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/trace.go @@ -117,7 +117,7 @@ func (e *traceExporter) uploadSpans(spans []*trace.SpanData) { // Create a never-sampled span to prevent traces associated with exporter. span := trace.NewSpan("go.opencensus.io/exporter/stackdriver.uploadSpans", nil, trace.StartOptions{Sampler: trace.NeverSample()}) defer span.End() - span.SetAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) + span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) ctx := trace.WithSpan(context.Background(), span) // TODO: add timeouts err := e.client.BatchWriteSpans(ctx, &req) diff --git a/vendor/go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/go.opencensus.io/exporter/stackdriver/trace_proto.go index 504601d42..ce9e38231 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/trace_proto.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/trace_proto.go @@ -52,10 +52,18 @@ func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span { traceIDString := s.SpanContext.TraceID.String() spanIDString := s.SpanContext.SpanID.String() + name := s.Name + switch s.SpanKind { + case trace.SpanKindClient: + name = "Sent." + name + case trace.SpanKindServer: + name = "Recv." + name + } + sp := &tracepb.Span{ Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, SpanId: spanIDString, - DisplayName: trunc(s.Name, 128), + DisplayName: trunc(name, 128), StartTime: timestampProto(s.StartTime), EndTime: timestampProto(s.EndTime), SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, diff --git a/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go b/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go index e0dbc0ef3..b65551525 100644 --- a/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go +++ b/vendor/go.opencensus.io/exporter/stackdriver/trace_proto_test.go @@ -77,10 +77,10 @@ func TestExportTrace(t *testing.T) { span2.AddMessageSendEvent(0x123, 1024, 512) span2.Annotatef(nil, "in span%d", 2) span2.Annotate(nil, big.NewRat(2, 4).String()) - span2.SetAttributes( + span2.AddAttributes( trace.StringAttribute("key1", "value1"), trace.StringAttribute("key2", "value2")) - span2.SetAttributes(trace.Int64Attribute("key1", 100)) + span2.AddAttributes(trace.Int64Attribute("key1", 100)) span2.End() } { diff --git a/vendor/go.opencensus.io/exporter/zipkin/example/main.go b/vendor/go.opencensus.io/exporter/zipkin/example/main.go new file mode 100644 index 000000000..f82631a12 --- /dev/null +++ b/vendor/go.opencensus.io/exporter/zipkin/example/main.go @@ -0,0 +1,75 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "log" + "time" + + openzipkin "github.com/openzipkin/zipkin-go" + "github.com/openzipkin/zipkin-go/reporter/http" + "go.opencensus.io/exporter/zipkin" + "go.opencensus.io/trace" +) + +func main() { + // The localEndpoint stores the name and address of the local service + localEndpoint, err := openzipkin.NewEndpoint("example-server", "192.168.1.5:5454") + if err != nil { + log.Println(err) + } + + // The Zipkin reporter takes collected spans from the app and reports them to the backend + // http://localhost:9411/api/v2/spans is the default for the Zipkin Span v2 + reporter := http.NewReporter("http://localhost:9411/api/v2/spans") + defer reporter.Close() + + // The OpenCensus exporter wraps the Zipkin reporter + exporter := zipkin.NewExporter(reporter, localEndpoint) + trace.RegisterExporter(exporter) + + // For example purposes, sample every trace. + trace.SetDefaultSampler(trace.AlwaysSample()) + + ctx := context.Background() + foo(ctx) +} + +func foo(ctx context.Context) { + // Name the current span "/foo" + ctx, span := trace.StartSpan(ctx, "/foo") + defer span.End() + + // Foo calls bar and baz + bar(ctx) + baz(ctx) +} + +func bar(ctx context.Context) { + ctx, span := trace.StartSpan(ctx, "/bar") + defer span.End() + + // Do bar + time.Sleep(2 * time.Millisecond) +} + +func baz(ctx context.Context) { + ctx, span := trace.StartSpan(ctx, "/baz") + defer span.End() + + // Do baz + time.Sleep(4 * time.Millisecond) +} diff --git a/vendor/go.opencensus.io/exporter/zipkin/zipkin.go b/vendor/go.opencensus.io/exporter/zipkin/zipkin.go index 9d0c4214d..30d2fa438 100644 --- a/vendor/go.opencensus.io/exporter/zipkin/zipkin.go +++ b/vendor/go.opencensus.io/exporter/zipkin/zipkin.go @@ -18,7 +18,6 @@ package zipkin // import "go.opencensus.io/exporter/zipkin" import ( "encoding/binary" "strconv" - "strings" "github.com/openzipkin/zipkin-go/model" "github.com/openzipkin/zipkin-go/reporter" @@ -102,23 +101,12 @@ func convertSpanID(s trace.SpanID) model.ID { } func spanKind(s *trace.SpanData) model.Kind { - if s.HasRemoteParent { - return model.Server - } - if strings.HasPrefix(s.Name, "Sent.") { + switch s.SpanKind { + case trace.SpanKindClient: return model.Client - } - if strings.HasPrefix(s.Name, "Recv.") { + case trace.SpanKindServer: return model.Server } - if len(s.MessageEvents) > 0 { - switch s.MessageEvents[0].EventType { - case trace.MessageEventTypeSent: - return model.Client - case trace.MessageEventTypeRecv: - return model.Server - } - } return model.Undetermined } diff --git a/vendor/go.opencensus.io/exporter/zipkin/zipkin_test.go b/vendor/go.opencensus.io/exporter/zipkin/zipkin_test.go index a96f08e1e..24194e02c 100644 --- a/vendor/go.opencensus.io/exporter/zipkin/zipkin_test.go +++ b/vendor/go.opencensus.io/exporter/zipkin/zipkin_test.go @@ -50,6 +50,7 @@ func TestExport(t *testing.T) { TraceOptions: 1, }, Name: "name", + SpanKind: trace.SpanKindClient, StartTime: now, EndTime: now.Add(24 * time.Hour), Attributes: map[string]interface{}{ diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index f1d0efd1c..8907e4e3c 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -18,7 +18,7 @@ import "time" // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -const UserAgent = "opencensus-go-v0.1.0" +const UserAgent = "opencensus-go-v0.4.0" // MonotonicEndTime returns the end time at present // but offset from start, monotonically. diff --git a/vendor/go.opencensus.io/internal/readme/source.md b/vendor/go.opencensus.io/internal/readme/source.md index 791086c04..31aaa4c97 100644 --- a/vendor/go.opencensus.io/internal/readme/source.md +++ b/vendor/go.opencensus.io/internal/readme/source.md @@ -34,20 +34,29 @@ Currently, OpenCensus supports: * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +Start with instrumenting HTTP and gRPC clients and servers, +then add additional custom instrumentation if needed. + +* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http) +* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc) + + ## Tags Tags represent propagated key-value pairs. They are propagated using context.Context in the same process or can be encoded to be transmitted on the wire and decoded back to a tag.Map at the destination. -### Getting a key by a name - -A key is defined by its name. To use a key, a user needs to know its name and type. - -[embedmd]:# (tags.go stringKey) - -### Creating tags and propagating them - Package tag provides a builder to create tag maps and put it into the current context. To propagate a tag map to downstream methods and RPCs, New @@ -56,31 +65,20 @@ If there is already a tag map in the current context, it will be replaced. [embedmd]:# (tags.go new) -### Propagating a tag map in a context - -If you have access to a tag.Map, you can also -propagate it in the current context: - -[embedmd]:# (tags.go newContext) - -In order to update existing tags from the current context, -use New and pass the returned context. - -[embedmd]:# (tags.go replaceTagMap) - - ## Stats -### Measures +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. -Measures are used for recording data points with associated units. -Creating a Measure: +OpenCensus stats collection happens in two stages: -[embedmd]:# (stats.go measure) +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data -### Recording Measurements +### Recording -Measurements are data points associated with Measures. +Measurements are data points associated with a measure. Recording implicitly tags the set of Measurements with the tags from the provided context: @@ -89,7 +87,7 @@ provided context: ### Views Views are how Measures are aggregated. You can think of them as queries over the -set of recorded data points (Measurements). +set of recorded data points (measurements). Views have two parts: the tags to group by and the aggregation type used. @@ -101,36 +99,17 @@ Currently four types of aggregations are supported: [embedmd]:# (stats.go aggs) -Here we create a view with the DistributionAggregation over our Measure. -All Measurements will be aggregated together irrespective of their tags, -i.e. no grouping by tag. +Here we create a view with the DistributionAggregation over our measure. [embedmd]:# (stats.go view) Subscribe begins collecting data for the view. Subscribed views' data will be exported via the registered exporters. -[embedmd]:# (stats.go registerExporter) - -An example logger exporter is below: - -[embedmd]:# (stats.go exporter) - -Configure the default interval between reports of collected data. -This is a system wide interval and impacts all views. The default -interval duration is 10 seconds. - -[embedmd]:# (stats.go reportingPeriod) - - ## Traces -### Starting and ending a span - [embedmd]:# (trace.go startend) -More tracing examples are coming soon... - ## Profiles OpenCensus tags can be applied as profiler labels @@ -142,7 +121,6 @@ A screenshot of the CPU profile from the program above: ![CPU profile](https://i.imgur.com/jBKjlkw.png) - [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master [travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go [appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true @@ -160,4 +138,4 @@ A screenshot of the CPU profile from the program above: [exporter-stackdriver]: https://godoc.org/go.opencensus.io/exporter/stackdriver [exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin [exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger -[exporter-xray]: https://godoc.org/go.opencensus.io/exporter/xray +[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws diff --git a/vendor/go.opencensus.io/internal/readme/stats.go b/vendor/go.opencensus.io/internal/readme/stats.go index 494968d88..70913d9af 100644 --- a/vendor/go.opencensus.io/internal/readme/stats.go +++ b/vendor/go.opencensus.io/internal/readme/stats.go @@ -18,7 +18,6 @@ package readme import ( "context" "log" - "time" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -30,28 +29,21 @@ import ( func statsExamples() { ctx := context.Background() - // START measure videoSize, err := stats.Int64("my.org/video_size", "processed video size", "MB") if err != nil { log.Fatal(err) } - // END measure - _ = videoSize - // START findMeasure m := stats.FindMeasure("my.org/video_size") if m == nil { log.Fatalln("measure not found") } - // END findMeasure - - _ = m // START aggs - distAgg := view.DistributionAggregation{0, 1 << 32, 2 << 32, 3 << 32} - countAgg := view.CountAggregation{} - sumAgg := view.SumAggregation{} - meanAgg := view.MeanAggregation{} + distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) + countAgg := view.Count() + sumAgg := view.Sum() + meanAgg := view.Mean() // END aggs _, _, _, _ = distAgg, countAgg, sumAgg, meanAgg @@ -61,33 +53,13 @@ func statsExamples() { Name: "my.org/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, - Aggregation: view.DistributionAggregation([]float64{0, 1 << 32, 2 << 32, 3 << 32}), + Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to subscribe to view: %v", err) } // END view - // START reportingPeriod - view.SetReportingPeriod(5 * time.Second) - // END reportingPeriod - // START record stats.Record(ctx, videoSize.M(102478)) // END record - - // START registerExporter - // Register an exporter to be able to retrieve - // the data from the subscribed views. - view.RegisterExporter(&exporter{}) - // END registerExporter } - -// START exporter - -type exporter struct{} - -func (e *exporter) ExportView(vd *view.Data) { - log.Println(vd) -} - -// END exporter diff --git a/vendor/go.opencensus.io/internal/readme/tags.go b/vendor/go.opencensus.io/internal/readme/tags.go index 47d183623..579e3e7e2 100644 --- a/vendor/go.opencensus.io/internal/readme/tags.go +++ b/vendor/go.opencensus.io/internal/readme/tags.go @@ -24,16 +24,6 @@ import ( func tagsExamples() { ctx := context.Background() - // START stringKey - // Get a key to represent user OS. - key, err := tag.NewKey("my.org/keys/user-os") - if err != nil { - log.Fatal(err) - } - // END stringKey - _ = key - - // START new osKey, err := tag.NewKey("my.org/keys/user-os") if err != nil { log.Fatal(err) @@ -43,6 +33,7 @@ func tagsExamples() { log.Fatal(err) } + // START new ctx, err = tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), @@ -52,22 +43,6 @@ func tagsExamples() { } // END new - // START newContext - m := tag.FromContext(ctx) - // END newContext - - _ = m - - // START replaceTagMap - ctx, err = tag.New(ctx, - tag.Insert(osKey, "macOS-10.12.5"), - tag.Upsert(userIDKey, "fff0989878"), - ) - if err != nil { - log.Fatal(err) - } - // END replaceTagMap - // START profiler ctx, err = tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/generate.sh b/vendor/go.opencensus.io/internal/testpb/generate.sh similarity index 100% rename from vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/generate.sh rename to vendor/go.opencensus.io/internal/testpb/generate.sh diff --git a/vendor/go.opencensus.io/internal/testpb/impl.go b/vendor/go.opencensus.io/internal/testpb/impl.go new file mode 100644 index 000000000..24533afcd --- /dev/null +++ b/vendor/go.opencensus.io/internal/testpb/impl.go @@ -0,0 +1,93 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testpb + +import ( + "context" + "fmt" + "io" + "net" + "testing" + "time" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/trace" + "google.golang.org/grpc" +) + +type testServer struct{} + +var _ FooServer = (*testServer)(nil) + +func (s *testServer) Single(ctx context.Context, in *FooRequest) (*FooResponse, error) { + if in.SleepNanos > 0 { + _, span := trace.StartSpan(ctx, "testpb.Single.Sleep") + span.AddAttributes(trace.Int64Attribute("sleep_nanos", in.SleepNanos)) + time.Sleep(time.Duration(in.SleepNanos)) + span.End() + } + if in.Fail { + return nil, fmt.Errorf("request failed") + } + return &FooResponse{}, nil +} + +func (s *testServer) Multiple(stream Foo_MultipleServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + if in.Fail { + return fmt.Errorf("request failed") + } + if err := stream.Send(&FooResponse{}); err != nil { + return err + } + } +} + +func NewTestClient(l *testing.T) (client FooClient, cleanup func()) { + // initialize server + listener, err := net.Listen("tcp", "localhost:0") + if err != nil { + l.Fatal(err) + } + server := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})) + RegisterFooServer(server, &testServer{}) + go server.Serve(listener) + + // Initialize client. + clientConn, err := grpc.Dial( + listener.Addr().String(), + grpc.WithInsecure(), + grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), + grpc.WithBlock()) + + if err != nil { + l.Fatal(err) + } + client = NewFooClient(clientConn) + + cleanup = func() { + server.GracefulStop() + clientConn.Close() + } + + return client, cleanup +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.pb.go b/vendor/go.opencensus.io/internal/testpb/test.pb.go similarity index 84% rename from vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.pb.go rename to vendor/go.opencensus.io/internal/testpb/test.pb.go index 76346fc1e..b8e2bbc95 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.pb.go +++ b/vendor/go.opencensus.io/internal/testpb/test.pb.go @@ -34,7 +34,8 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type FooRequest struct { - Fail bool `protobuf:"varint,1,opt,name=fail" json:"fail,omitempty"` + Fail bool `protobuf:"varint,1,opt,name=fail" json:"fail,omitempty"` + SleepNanos int64 `protobuf:"varint,2,opt,name=sleep_nanos,json=sleepNanos" json:"sleep_nanos,omitempty"` } func (m *FooRequest) Reset() { *m = FooRequest{} } @@ -49,6 +50,13 @@ func (m *FooRequest) GetFail() bool { return false } +func (m *FooRequest) GetSleepNanos() int64 { + if m != nil { + return m.SleepNanos + } + return 0 +} + type FooResponse struct { } @@ -203,16 +211,18 @@ var _Foo_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 137 bytes of a gzipped FileDescriptorProto + // 165 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, - 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0x94, 0x14, 0xb8, 0xb8, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0x94, 0x1c, 0xb9, 0xb8, 0xdc, 0xf2, 0xf3, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x84, 0xb8, 0x58, 0xd2, 0x12, - 0x33, 0x73, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0xc0, 0x6c, 0x25, 0x5e, 0x2e, 0x6e, 0xb0, - 0x8a, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa3, 0x42, 0x2e, 0x66, 0xb7, 0xfc, 0x7c, 0x21, 0x43, - 0x2e, 0xb6, 0xe0, 0xcc, 0xbc, 0xf4, 0x9c, 0x54, 0x21, 0x21, 0x3d, 0x88, 0x51, 0x7a, 0x08, 0x73, - 0xa4, 0x84, 0x51, 0xc4, 0x20, 0x3a, 0x85, 0xcc, 0xb9, 0x38, 0x7c, 0x4b, 0x73, 0x4a, 0x32, 0x0b, - 0x48, 0xd0, 0xa4, 0xc1, 0x68, 0xc0, 0x98, 0xc4, 0x06, 0x76, 0xb2, 0x31, 0x20, 0x00, 0x00, 0xff, - 0xff, 0xda, 0xc5, 0x9f, 0x2f, 0xc0, 0x00, 0x00, 0x00, + 0x33, 0x73, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0xc0, 0x6c, 0x21, 0x79, 0x2e, 0xee, 0xe2, + 0x9c, 0xd4, 0xd4, 0x82, 0xf8, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xe6, + 0x20, 0x2e, 0xb0, 0x90, 0x1f, 0x48, 0x44, 0x89, 0x97, 0x8b, 0x1b, 0x6c, 0x44, 0x71, 0x41, 0x7e, + 0x5e, 0x71, 0xaa, 0x51, 0x21, 0x17, 0xb3, 0x5b, 0x7e, 0xbe, 0x90, 0x21, 0x17, 0x5b, 0x70, 0x66, + 0x5e, 0x7a, 0x4e, 0xaa, 0x90, 0x90, 0x1e, 0xc4, 0x2e, 0x3d, 0x84, 0x45, 0x52, 0xc2, 0x28, 0x62, + 0x10, 0x9d, 0x42, 0xe6, 0x5c, 0x1c, 0xbe, 0xa5, 0x39, 0x25, 0x99, 0x05, 0x24, 0x68, 0xd2, 0x60, + 0x34, 0x60, 0x4c, 0x62, 0x03, 0xfb, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x37, 0xb1, 0x2d, + 0x6e, 0xe1, 0x00, 0x00, 0x00, } //go:generate ./generate.sh diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.proto b/vendor/go.opencensus.io/internal/testpb/test.proto similarity index 90% rename from vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.proto rename to vendor/go.opencensus.io/internal/testpb/test.proto index 2a198a6f5..b82d128ac 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/internal/testpb/test.proto +++ b/vendor/go.opencensus.io/internal/testpb/test.proto @@ -4,6 +4,7 @@ package testpb; message FooRequest { bool fail = 1; + int64 sleep_nanos = 2; } message FooResponse { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go index a3d7fcd7b..f4a2d4d27 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client.go @@ -15,6 +15,7 @@ package ocgrpc import ( + "go.opencensus.io/trace" "golang.org/x/net/context" "google.golang.org/grpc/stats" @@ -23,39 +24,32 @@ import ( // ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and // traces. Use with gRPC clients only. type ClientHandler struct { - // NoTrace may be set to disable recording OpenCensus Spans around - // gRPC methods. - NoTrace bool - - // NoStats may be set to disable recording OpenCensus Stats around each - // gRPC method. - NoStats bool + // StartOptions allows configuring the StartOptions used to create new spans. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this handler. + StartOptions trace.StartOptions } func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } +// TagConn exists to satisfy gRPC stats.Handler. func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { // no-op return ctx } +// HandleRPC implements per-RPC tracing and stats instrumentation. func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - if !c.NoTrace { - c.traceHandleRPC(ctx, rs) - } - if !c.NoStats { - c.statsHandleRPC(ctx, rs) - } + traceHandleRPC(ctx, rs) + c.statsHandleRPC(ctx, rs) } +// TagRPC implements per-RPC context management. func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - if !c.NoTrace { - ctx = c.traceTagRPC(ctx, rti) - } - if !c.NoStats { - ctx = c.statsTagRPC(ctx, rti) - } + ctx = c.traceTagRPC(ctx, rti) + ctx = c.statsTagRPC(ctx, rti) return ctx } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index 60edcc012..e9a4d2bdf 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -43,7 +43,7 @@ var ( Description: "RPC Errors", TagKeys: []tag.Key{KeyStatus, KeyMethod}, Measure: ClientErrorCount, - Aggregation: view.MeanAggregation{}, + Aggregation: view.Mean(), } ClientRoundTripLatencyView = &view.View{ @@ -87,17 +87,15 @@ var ( } ) -// All the default client views provided by this package: -var ( - DefaultClientViews = []*view.View{ - ClientErrorCountView, - ClientRoundTripLatencyView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientRequestCountView, - ClientResponseCountView, - } -) +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientErrorCountView, + ClientRoundTripLatencyView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientRequestCountView, + ClientResponseCountView, +} // TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. // TODO(acetechnologist): This is temporary and will need to be replaced by a diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go index 282be0627..5a0489eae 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics_test.go @@ -30,7 +30,7 @@ func TestViewsAggregationsConform(t *testing.T) { // https://github.com/census-instrumentation/opencensus-java/blob/2b464864e3dd3f80e8e4c9dc72fccc225444a939/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcViewConstants.java#L113-L658 // Add any other defined views to be type checked during tests to ensure we don't regress. - assertTypeOf := func(v *view.View, wantSample view.Aggregation) { + assertTypeOf := func(v *view.View, wantSample *view.Aggregation) { aggregation := v.Aggregation gotValue := reflect.ValueOf(aggregation) wantValue := reflect.ValueOf(wantSample) @@ -40,12 +40,12 @@ func TestViewsAggregationsConform(t *testing.T) { } } - assertTypeOf(ClientErrorCountView, view.MeanAggregation{}) - assertTypeOf(ClientRoundTripLatencyView, view.DistributionAggregation{}) - assertTypeOf(ClientRequestBytesView, view.DistributionAggregation{}) - assertTypeOf(ClientResponseBytesView, view.DistributionAggregation{}) - assertTypeOf(ClientRequestCountView, view.DistributionAggregation{}) - assertTypeOf(ClientResponseCountView, view.DistributionAggregation{}) + assertTypeOf(ClientErrorCountView, view.Mean()) + assertTypeOf(ClientRoundTripLatencyView, view.Distribution()) + assertTypeOf(ClientRequestBytesView, view.Distribution()) + assertTypeOf(ClientResponseBytesView, view.Distribution()) + assertTypeOf(ClientRequestCountView, view.Distribution()) + assertTypeOf(ClientResponseCountView, view.Distribution()) } func TestStrictViewNames(t *testing.T) { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go index 59d963478..22d076dc7 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc/status" ) -// TagRPC gets the tag.Map populated by the application code, serializes +// statsTagRPC gets the tag.Map populated by the application code, serializes // its tags into the GRPC metadata in order to be sent to the server. func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { startTime := time.Now() @@ -38,23 +38,23 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) return ctx } - d := &rpcData{startTime: startTime} + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } ts := tag.FromContext(ctx) if ts != nil { encoded := tag.Encode(ts) ctx = stats.SetTags(ctx, encoded) } - ctx, _ = tag.New(ctx, - tag.Upsert(KeyMethod, methodName(info.FullMethodName)), - ) + // TODO(acetechnologist): should we be recording this later? What is the // point of updating d.reqLen & d.reqCount if we update now? - ocstats.Record(ctx, ClientStartedCount.M(1)) - + record(ctx, d, "", ClientStartedCount.M(1)) return context.WithValue(ctx, grpcClientRPCKey, d) } -// HandleRPC processes the RPC events. +// statsHandleRPC processes the RPC events. func (h *ClientHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: @@ -79,7 +79,7 @@ func (h *ClientHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPay return } - ocstats.Record(ctx, ClientRequestBytes.M(int64(s.Length))) + record(ctx, d, "", ClientRequestBytes.M(int64(s.Length))) atomic.AddInt64(&d.reqCount, 1) } @@ -87,12 +87,12 @@ func (h *ClientHandler) handleRPCInPayload(ctx context.Context, s *stats.InPaylo d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) if !ok { if grpclog.V(2) { - grpclog.Infoln("clientHandler.handleRPCInPayload failed to retrieve *rpcData from context") + grpclog.Infoln("failed to retrieve *rpcData from context") } return } - ocstats.Record(ctx, ClientResponseBytes.M(int64(s.Length))) + record(ctx, d, "", ClientResponseBytes.M(int64(s.Length))) atomic.AddInt64(&d.respCount, 1) } @@ -100,7 +100,7 @@ func (h *ClientHandler) handleRPCEnd(ctx context.Context, s *stats.End) { d, ok := ctx.Value(grpcClientRPCKey).(*rpcData) if !ok { if grpclog.V(2) { - grpclog.Infoln("clientHandler.handleRPCEnd failed to retrieve *rpcData from context") + grpclog.Infoln("failed to retrieve *rpcData from context") } return } @@ -116,15 +116,13 @@ func (h *ClientHandler) handleRPCEnd(ctx context.Context, s *stats.End) { ClientRoundTripLatency.M(float64(elapsedTime) / float64(time.Millisecond)), } + var st string if s.Error != nil { s, ok := status.FromError(s.Error) if ok { - ctx, _ = tag.New(ctx, - tag.Upsert(KeyStatus, s.Code().String()), - ) + st = s.Code().String() } m = append(m, ClientErrorCount.M(1)) } - - ocstats.Record(ctx, m...) + record(ctx, d, st, m...) } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go index d34a8a5cb..90da4e5b1 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler_test.go @@ -18,6 +18,7 @@ package ocgrpc import ( "testing" + "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -298,12 +299,12 @@ func TestClientDefaultCollections(t *testing.T) { for _, tc := range tcs { // Register views. - err := view.Subscribe(DefaultClientViews...) - if err != nil { + if err := view.Subscribe(DefaultClientViews...); err != nil { t.Error(err) } - h := &ClientHandler{NoTrace: true} + h := &ClientHandler{} + h.StartOptions.Sampler = trace.NeverSample() for _, rpc := range tc.rpcs { mods := []tag.Mutator{} for _, t := range rpc.tags { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go index 7b9c1c805..1370323fb 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go @@ -14,4 +14,6 @@ // Package ocgrpc contains OpenCensus stats and trace // integrations for gRPC. +// +// Use ServerHandler for servers and ClientHandler for clients. package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go index 7c8c861a4..cb9f9a6d7 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/example_test.go @@ -24,8 +24,7 @@ import ( func ExampleClientHandler() { // Subscribe views to collect data. - err := view.Subscribe(ocgrpc.DefaultClientViews...) - if err != nil { + if err := view.Subscribe(ocgrpc.DefaultClientViews...); err != nil { log.Fatal(err) } @@ -40,8 +39,7 @@ func ExampleClientHandler() { func ExampleServerHandler() { // Subscribe to views to collect data. - err := view.Subscribe(ocgrpc.DefaultServerViews...) - if err != nil { + if err := view.Subscribe(ocgrpc.DefaultServerViews...); err != nil { log.Fatal(err) } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go index 81b020e6a..dbbfd7f16 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/grpc_test.go @@ -20,6 +20,7 @@ import ( "go.opencensus.io/stats/view" "golang.org/x/net/context" + "google.golang.org/grpc/metadata" "go.opencensus.io/trace" @@ -70,45 +71,64 @@ func TestClientHandler(t *testing.T) { } func TestServerHandler(t *testing.T) { - ctx := context.Background() - te := &traceExporter{} - trace.RegisterExporter(te) - if err := ServerRequestCountView.Subscribe(); err != nil { - t.Fatal(err) + tests := []struct { + name string + newTrace bool + expectTraces int + }{ + {"trust_metadata", false, 1}, + {"no_trust_metadata", true, 0}, } - // Ensure we start tracing. - span := trace.NewSpan("/foo", nil, trace.StartOptions{ - Sampler: trace.AlwaysSample(), - }) - ctx = trace.WithSpan(ctx, span) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { - handler := &ServerHandler{} - ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{ - FullMethodName: "/service.foo/method", - }) - handler.HandleRPC(ctx, &stats.Begin{ - BeginTime: time.Now(), - }) - handler.HandleRPC(ctx, &stats.End{ - EndTime: time.Now(), - }) + ctx := context.Background() - stats, err := view.RetrieveData(ServerRequestCountView.Name) - if err != nil { - t.Fatal(err) + handler := &ServerHandler{ + IsPublicEndpoint: test.newTrace, + StartOptions: trace.StartOptions{ + Sampler: trace.ProbabilitySampler(0.0), + }, + } + + te := &traceExporter{} + trace.RegisterExporter(te) + if err := ServerRequestCountView.Subscribe(); err != nil { + t.Fatal(err) + } + + md := metadata.MD{ + "grpc-trace-bin": []string{string([]byte{0, 0, 62, 116, 14, 118, 117, 157, 126, 7, 114, 152, 102, 125, 235, 34, 114, 238, 1, 187, 201, 24, 210, 231, 20, 175, 241, 2, 1})}, + } + ctx = metadata.NewIncomingContext(ctx, md) + ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: "/service.foo/method", + }) + handler.HandleRPC(ctx, &stats.Begin{ + BeginTime: time.Now(), + }) + handler.HandleRPC(ctx, &stats.End{ + EndTime: time.Now(), + }) + + rows, err := view.RetrieveData(ServerRequestCountView.Name) + if err != nil { + t.Fatal(err) + } + traces := te.buffer + + if got, want := len(rows), 1; got != want { + t.Errorf("Got %v rows; want %v", got, want) + } + if got, want := len(traces), test.expectTraces; got != want { + t.Errorf("Got %v traces; want %v", got, want) + } + + // Cleanup. + view.Unsubscribe(ServerRequestCountView) + }) } - traces := te.buffer - - if got, want := len(stats), 1; got != want { - t.Errorf("Got %v stats; want %v", got, want) - } - if got, want := len(traces), 1; got != want { - t.Errorf("Got %v traces; want %v", got, want) - } - - // Cleanup. - view.Unsubscribe(ServerRequestCountView) } type traceExporter struct { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go index b0343ff8b..f5da89b90 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server.go @@ -15,6 +15,7 @@ package ocgrpc import ( + "go.opencensus.io/trace" "golang.org/x/net/context" "google.golang.org/grpc/stats" @@ -22,40 +23,58 @@ import ( // ServerHandler implements gRPC stats.Handler recording OpenCensus stats and // traces. Use with gRPC servers. +// +// When installed (see Example), tracing metadata is read from inbound RPCs +// by default. If no tracing metadata is present, or if the tracing metadata is +// present but the SpanContext isn't sampled, then a new trace may be started +// (as determined by Sampler). type ServerHandler struct { - // NoTrace may be set to disable recording OpenCensus Spans around - // gRPC methods. - NoTrace bool + // IsPublicEndpoint may be set to true to always start a new trace around + // each RPC. Any SpanContext in the RPC metadata will be added as a linked + // span instead of making it the parent of the span created around the + // server RPC. + // + // Be aware that if you leave this false (the default) on a public-facing + // server, callers will be able to send tracing metadata in gRPC headers + // and trigger traces in your backend. + IsPublicEndpoint bool - // NoStats may be set to disable recording OpenCensus Stats around each - // gRPC method. - NoStats bool + // StartOptions to use for to spans started around RPCs handled by this server. + // + // These will apply even if there is tracing metadata already + // present on the inbound RPC but the SpanContext is not sampled. This + // ensures that each service has some opportunity to be traced. If you would + // like to not add any additional traces for this gRPC service, set: + // + // StartOptions.Sampler = trace.ProbabilitySampler(0.0) + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this handler. + StartOptions trace.StartOptions } +var _ stats.Handler = (*ServerHandler)(nil) + +// HandleConn exists to satisfy gRPC stats.Handler. func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } +// TagConn exists to satisfy gRPC stats.Handler. func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { // no-op return ctx } +// HandleRPC implements per-RPC tracing and stats instrumentation. func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - if !s.NoTrace { - s.traceHandleRPC(ctx, rs) - } - if !s.NoStats { - s.statsHandleRPC(ctx, rs) - } + traceHandleRPC(ctx, rs) + s.statsHandleRPC(ctx, rs) } +// TagRPC implements per-RPC context management. func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - if !s.NoTrace { - ctx = s.traceTagRPC(ctx, rti) - } - if !s.NoStats { - ctx = s.statsTagRPC(ctx, rti) - } + ctx = s.traceTagRPC(ctx, rti) + ctx = s.statsTagRPC(ctx, rti) return ctx } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index 3693e406c..7ff4d24a0 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -47,7 +47,7 @@ var ( Description: "RPC Errors", TagKeys: []tag.Key{KeyMethod, KeyStatus}, Measure: ServerErrorCount, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } ServerServerElapsedTimeView = &view.View{ @@ -91,16 +91,14 @@ var ( } ) -// All default server views provided by this package: -var ( - DefaultServerViews = []*view.View{ - ServerErrorCountView, - ServerServerElapsedTimeView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerRequestCountView, - ServerResponseCountView, - } -) +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerErrorCountView, + ServerServerElapsedTimeView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerRequestCountView, + ServerResponseCountView, +} // TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go index 29f1ef40b..c146da5dd 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go @@ -29,7 +29,7 @@ import ( "google.golang.org/grpc/status" ) -// TagRPC gets the metadata from gRPC context, extracts the encoded tags from +// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from // it and creates a new tag.Map and puts them into the returned context. func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { startTime := time.Now() @@ -39,13 +39,16 @@ func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) } return ctx } - d := &rpcData{startTime: startTime} - ctx, _ = h.createTags(ctx, info.FullMethodName) - ocstats.Record(ctx, ServerStartedCount.M(1)) + d := &rpcData{ + startTime: startTime, + method: info.FullMethodName, + } + ctx, _ = h.createTags(ctx) + record(ctx, d, "", ServerStartedCount.M(1)) return context.WithValue(ctx, grpcServerRPCKey, d) } -// HandleRPC processes the RPC events. +// statsHandleRPC processes the RPC events. func (h *ServerHandler) statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { case *stats.Begin, *stats.InHeader, *stats.InTrailer, *stats.OutHeader, *stats.OutTrailer: @@ -66,12 +69,12 @@ func (h *ServerHandler) handleRPCInPayload(ctx context.Context, s *stats.InPaylo d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) if !ok { if grpclog.V(2) { - grpclog.Infoln("serverHandler.handleRPCInPayload failed to retrieve *rpcData from context") + grpclog.Infoln("handleRPCInPayload: failed to retrieve *rpcData from context") } return } - ocstats.Record(ctx, ServerRequestBytes.M(int64(s.Length))) + record(ctx, d, "", ServerRequestBytes.M(int64(s.Length))) atomic.AddInt64(&d.reqCount, 1) } @@ -79,12 +82,12 @@ func (h *ServerHandler) handleRPCOutPayload(ctx context.Context, s *stats.OutPay d, ok := ctx.Value(grpcServerRPCKey).(*rpcData) if !ok { if grpclog.V(2) { - grpclog.Infoln("serverHandler.handleRPCOutPayload failed to retrieve *rpcData from context") + grpclog.Infoln("handleRPCOutPayload: failed to retrieve *rpcData from context") } return } - ocstats.Record(ctx, ServerResponseBytes.M(int64(s.Length))) + record(ctx, d, "", ServerResponseBytes.M(int64(s.Length))) atomic.AddInt64(&d.respCount, 1) } @@ -108,31 +111,27 @@ func (h *ServerHandler) handleRPCEnd(ctx context.Context, s *stats.End) { ServerServerElapsedTime.M(float64(elapsedTime) / float64(time.Millisecond)), } + var st string if s.Error != nil { s, ok := status.FromError(s.Error) if ok { - ctx, _ = tag.New(ctx, - tag.Upsert(KeyStatus, s.Code().String()), - ) + st = s.Code().String() } m = append(m, ServerErrorCount.M(1)) } - - ocstats.Record(ctx, m...) + record(ctx, d, st, m...) } // createTags creates a new tag map containing the tags extracted from the // gRPC metadata. -func (h *ServerHandler) createTags(ctx context.Context, fullinfo string) (context.Context, error) { - mods := []tag.Mutator{ - tag.Upsert(KeyMethod, methodName(fullinfo)), +func (h *ServerHandler) createTags(ctx context.Context) (context.Context, error) { + buf := stats.Tags(ctx) + if buf == nil { + return ctx, nil } - if tagsBin := stats.Tags(ctx); tagsBin != nil { - old, err := tag.Decode([]byte(tagsBin)) - if err != nil { - return nil, fmt.Errorf("serverHandler.createTags failed to decode tagsBin %v: %v", tagsBin, err) - } - return tag.New(tag.NewContext(ctx, old), mods...) + propagated, err := tag.Decode(buf) + if err != nil { + return nil, fmt.Errorf("serverHandler.createTags failed to decode: %v", err) } - return tag.New(ctx, mods...) + return tag.NewContext(ctx, propagated), nil } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go index b90ac3e74..85a315eec 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler_test.go @@ -18,6 +18,7 @@ package ocgrpc import ( "testing" + "go.opencensus.io/trace" "golang.org/x/net/context" "go.opencensus.io/stats/view" @@ -54,6 +55,7 @@ func TestServerDefaultCollections(t *testing.T) { rpcs []*rpc wants []*wantData } + tcs := []testCase{ { "1", @@ -296,13 +298,12 @@ func TestServerDefaultCollections(t *testing.T) { } for _, tc := range tcs { - for _, v := range DefaultServerViews { - if err := v.Subscribe(); err != nil { - t.Error(err) - } + if err := view.Subscribe(DefaultServerViews...); err != nil { + t.Fatal(err) } - h := &ServerHandler{NoTrace: true} + h := &ServerHandler{} + h.StartOptions.Sampler = trace.NeverSample() for _, rpc := range tc.rpcs { mods := []tag.Mutator{} for _, t := range rpc.tags { diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 3068150b2..52b41ac28 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -19,8 +19,10 @@ import ( "strings" "time" + ocstats "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "golang.org/x/net/context" ) type grpcInstrumentationKey string @@ -29,19 +31,23 @@ type grpcInstrumentationKey string // and end of an call. It holds the info that this package needs to keep track // of between the various GRPC events. type rpcData struct { + // reqCount and respCount has to be the first words + // in order to be 64-aligned on 32-bit architectures. + reqCount, respCount int64 // access atomically + // startTime represents the time at which TagRPC was invoked at the // beginning of an RPC. It is an appoximation of the time when the // application code invoked GRPC code. - startTime time.Time - reqCount, respCount int64 // access atomically + startTime time.Time + method string } // The following variables define the default hard-coded auxiliary data used by // both the default GRPC client and GRPC server metrics. var ( - DefaultBytesDistribution = view.DistributionAggregation{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} - DefaultMillisecondsDistribution = view.DistributionAggregation{0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000} - DefaultMessageCountDistribution = view.DistributionAggregation{0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536} + DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) ) var ( @@ -58,3 +64,14 @@ var ( func methodName(fullname string) string { return strings.TrimLeft(fullname, "/") } + +func record(ctx context.Context, data *rpcData, status string, m ...ocstats.Measurement) { + mods := []tag.Mutator{ + tag.Upsert(KeyMethod, methodName(data.method)), + } + if status != "" { + mods = append(mods, tag.Upsert(KeyStatus, status)) + } + ctx, _ = tag.New(ctx, mods...) + ocstats.Record(ctx, m...) +} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go index fe086519c..6d6836f02 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go @@ -34,12 +34,14 @@ const traceContextKey = "grpc-trace-bin" // It returns ctx with the new trace span added and a serialization of the // SpanContext added to the outgoing gRPC metadata. func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - name := "Sent" + strings.Replace(rti.FullMethodName, "/", ".", -1) - ctx, _ = trace.StartSpan(ctx, name) - traceContextBinary := propagation.Binary(trace.FromContext(ctx).SpanContext()) - if len(traceContextBinary) == 0 { - return ctx - } + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + span := trace.NewSpan(name, trace.FromContext(ctx), trace.StartOptions{ + Sampler: c.StartOptions.Sampler, + SpanKind: trace.SpanKindClient, + }) // span is ended by traceHandleRPC + ctx = trace.WithSpan(ctx, span) + traceContextBinary := propagation.Binary(span.SpanContext()) return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) } @@ -50,35 +52,43 @@ func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) // // It returns ctx, with the new trace span added. func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + opts := trace.StartOptions{ + Sampler: s.StartOptions.Sampler, + SpanKind: trace.SpanKindServer, + } + md, _ := metadata.FromIncomingContext(ctx) - name := "Recv" + strings.Replace(rti.FullMethodName, "/", ".", -1) - if s := md[traceContextKey]; len(s) > 0 { - if parent, ok := propagation.FromBinary([]byte(s[0])); ok { - span := trace.NewSpanWithRemoteParent(name, parent, trace.StartOptions{}) + name := strings.TrimPrefix(rti.FullMethodName, "/") + name = strings.Replace(name, "/", ".", -1) + traceContext := md[traceContextKey] + var ( + parent trace.SpanContext + haveParent bool + ) + if len(traceContext) > 0 { + // Metadata with keys ending in -bin are actually binary. They are base64 + // encoded before being put on the wire, see: + // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata + traceContextBinary := []byte(traceContext[0]) + parent, haveParent = propagation.FromBinary(traceContextBinary) + if haveParent && !s.IsPublicEndpoint { + span := trace.NewSpanWithRemoteParent(name, parent, opts) return trace.WithSpan(ctx, span) } } - // TODO(ramonza): should we ignore the in-process parent here? - ctx, _ = trace.StartSpan(ctx, name) - return ctx + span := trace.NewSpan(name, nil, opts) + if haveParent { + span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) + } + return trace.WithSpan(ctx, span) } -// HandleRPC processes the RPC stats, adding information to the current trace span. -func (c *ClientHandler) traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - handleRPC(ctx, rs) -} - -// HandleRPC processes the RPC stats, adding information to the current trace span. -func (s *ServerHandler) traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - handleRPC(ctx, rs) -} - -func handleRPC(ctx context.Context, rs stats.RPCStats) { +func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { span := trace.FromContext(ctx) // TODO: compressed and uncompressed sizes are not populated in every message. switch rs := rs.(type) { case *stats.Begin: - span.SetAttributes( + span.AddAttributes( trace.BoolAttribute("Client", rs.Client), trace.BoolAttribute("FailFast", rs.FailFast)) case *stats.InPayload: diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go index 25d0f5f1c..9e590d994 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common_test.go @@ -15,255 +15,32 @@ package ocgrpc import ( - "fmt" - "io" - "net" "testing" - "time" - "go.opencensus.io/plugin/ocgrpc/internal/testpb" "go.opencensus.io/trace" "golang.org/x/net/context" - "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" ) -type testServer struct{} - -func (s *testServer) Single(ctx context.Context, in *testpb.FooRequest) (*testpb.FooResponse, error) { - if in.Fail { - return nil, fmt.Errorf("request failed") +func TestClientHandler_traceTagRPC(t *testing.T) { + ch := &ClientHandler{} + ch.StartOptions.Sampler = trace.AlwaysSample() + rti := &stats.RPCTagInfo{ + FullMethodName: "xxx", } - return &testpb.FooResponse{}, nil -} + ctx := context.Background() + ctx = ch.traceTagRPC(ctx, rti) -func (s *testServer) Multiple(stream testpb.Foo_MultipleServer) error { - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - if in.Fail { - return fmt.Errorf("request failed") - } - if err := stream.Send(&testpb.FooResponse{}); err != nil { - return err - } - } -} - -func newTestClientAndServer() (client testpb.FooClient, server *grpc.Server, cleanup func(), err error) { - // initialize server - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, nil, nil, fmt.Errorf("net.Listen: %v", err) - } - server = grpc.NewServer(grpc.StatsHandler(&ServerHandler{NoStats: true})) - testpb.RegisterFooServer(server, &testServer{}) - go server.Serve(listener) - - // initialize client - clientConn, err := grpc.Dial(listener.Addr().String(), grpc.WithInsecure(), grpc.WithStatsHandler(&ClientHandler{NoStats: true}), grpc.WithBlock()) - if err != nil { - return nil, nil, nil, fmt.Errorf("grpc.Dial: %v", err) - } - client = testpb.NewFooClient(clientConn) - - cleanup = func() { - server.GracefulStop() - clientConn.Close() - } - - return client, server, cleanup, nil -} - -type testExporter struct { - ch chan *trace.SpanData -} - -func (t *testExporter) ExportSpan(s *trace.SpanData) { - go func() { t.ch <- s }() -} - -func TestStreaming(t *testing.T) { - trace.SetDefaultSampler(trace.AlwaysSample()) - te := testExporter{make(chan *trace.SpanData)} - trace.RegisterExporter(&te) - defer trace.UnregisterExporter(&te) - - client, _, cleanup, err := newTestClientAndServer() - if err != nil { - t.Fatalf("initializing client and server: %v", err) - } - - stream, err := client.Multiple(context.Background()) - if err != nil { - t.Fatalf("Call failed: %v", err) - } - - err = stream.Send(&testpb.FooRequest{}) - if err != nil { - t.Fatalf("Couldn't send streaming request: %v", err) - } - stream.CloseSend() - - for { - _, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - t.Errorf("stream.Recv() = %v; want no errors", err) - } - } - - cleanup() - - s1 := <-te.ch - s2 := <-te.ch - - checkSpanData(t, s1, s2, ".testpb.Foo.Multiple", true) - - select { - case <-te.ch: - t.Fatal("received extra exported spans") - case <-time.After(time.Second / 10): - } -} - -func TestStreamingFail(t *testing.T) { - trace.SetDefaultSampler(trace.AlwaysSample()) - te := testExporter{make(chan *trace.SpanData)} - trace.RegisterExporter(&te) - defer trace.UnregisterExporter(&te) - - client, _, cleanup, err := newTestClientAndServer() - if err != nil { - t.Fatalf("initializing client and server: %v", err) - } - - stream, err := client.Multiple(context.Background()) - if err != nil { - t.Fatalf("Call failed: %v", err) - } - - err = stream.Send(&testpb.FooRequest{Fail: true}) - if err != nil { - t.Fatalf("Couldn't send streaming request: %v", err) - } - stream.CloseSend() - - for { - _, err := stream.Recv() - if err == nil || err == io.EOF { - t.Errorf("stream.Recv() = %v; want errors", err) - } else { - break - } - } - - s1 := <-te.ch - s2 := <-te.ch - - checkSpanData(t, s1, s2, ".testpb.Foo.Multiple", false) - cleanup() - - select { - case <-te.ch: - t.Fatal("received extra exported spans") - case <-time.After(time.Second / 10): - } -} - -func TestSingle(t *testing.T) { - trace.SetDefaultSampler(trace.AlwaysSample()) - te := testExporter{make(chan *trace.SpanData)} - trace.RegisterExporter(&te) - defer trace.UnregisterExporter(&te) - - client, _, cleanup, err := newTestClientAndServer() - if err != nil { - t.Fatalf("initializing client and server: %v", err) - } - - _, err = client.Single(context.Background(), &testpb.FooRequest{}) - if err != nil { - t.Fatalf("Couldn't send request: %v", err) - } - - s1 := <-te.ch - s2 := <-te.ch - - checkSpanData(t, s1, s2, ".testpb.Foo.Single", true) - cleanup() - - select { - case <-te.ch: - t.Fatal("received extra exported spans") - case <-time.After(time.Second / 10): - } -} - -func TestSingleFail(t *testing.T) { - trace.SetDefaultSampler(trace.AlwaysSample()) - te := testExporter{make(chan *trace.SpanData)} - trace.RegisterExporter(&te) - defer trace.UnregisterExporter(&te) - - client, _, cleanup, err := newTestClientAndServer() - if err != nil { - t.Fatalf("initializing client and server: %v", err) - } - - _, err = client.Single(context.Background(), &testpb.FooRequest{Fail: true}) - if err == nil { - t.Fatalf("Got nil error from request, want non-nil") - } - - s1 := <-te.ch - s2 := <-te.ch - - checkSpanData(t, s1, s2, ".testpb.Foo.Single", false) - cleanup() - - select { - case <-te.ch: - t.Fatal("received extra exported spans") - case <-time.After(time.Second / 10): - } -} - -func checkSpanData(t *testing.T, s1, s2 *trace.SpanData, methodName string, success bool) { - t.Helper() - - if s1.Name < s2.Name { - s1, s2 = s2, s1 - } - - if got, want := s1.Name, "Sent"+methodName; got != want { - t.Errorf("Got name %q want %q", got, want) - } - if got, want := s2.Name, "Recv"+methodName; got != want { - t.Errorf("Got name %q want %q", got, want) - } - if got, want := s2.SpanContext.TraceID, s1.SpanContext.TraceID; got != want { - t.Errorf("Got trace IDs %s and %s, want them equal", got, want) - } - if got, want := s2.ParentSpanID, s1.SpanContext.SpanID; got != want { - t.Errorf("Got ParentSpanID %s, want %s", got, want) - } - if got := (s1.Status.Code == 0); got != success { - t.Errorf("Got success=%t want %t", got, success) - } - if got := (s2.Status.Code == 0); got != success { - t.Errorf("Got success=%t want %t", got, success) - } - if s1.HasRemoteParent { - t.Errorf("Got HasRemoteParent=%t, want false", s1.HasRemoteParent) - } - if !s2.HasRemoteParent { - t.Errorf("Got HasRemoteParent=%t, want true", s2.HasRemoteParent) + span := trace.FromContext(ctx) + if span == nil { + t.Fatal("expected span, got nil") + } + if !span.IsRecordingEvents() { + t.Errorf("span should be sampled") + } + md, ok := metadata.FromOutgoingContext(ctx) + if !ok || len(md) == 0 || len(md[traceContextKey]) == 0 { + t.Fatal("no metadata") } } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_test.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_test.go new file mode 100644 index 000000000..7cd1b2e52 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ocgrpc/trace_test.go @@ -0,0 +1,233 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocgrpc_test + +import ( + "io" + "testing" + "time" + + "go.opencensus.io/internal/testpb" + "go.opencensus.io/trace" + "golang.org/x/net/context" +) + +type testExporter struct { + ch chan *trace.SpanData +} + +func (t *testExporter) ExportSpan(s *trace.SpanData) { + go func() { t.ch <- s }() +} + +func TestStreaming(t *testing.T) { + trace.SetDefaultSampler(trace.AlwaysSample()) + te := testExporter{make(chan *trace.SpanData)} + trace.RegisterExporter(&te) + defer trace.UnregisterExporter(&te) + + client, cleanup := testpb.NewTestClient(t) + + stream, err := client.Multiple(context.Background()) + if err != nil { + t.Fatalf("Call failed: %v", err) + } + + err = stream.Send(&testpb.FooRequest{}) + if err != nil { + t.Fatalf("Couldn't send streaming request: %v", err) + } + stream.CloseSend() + + for { + _, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Errorf("stream.Recv() = %v; want no errors", err) + } + } + + cleanup() + + s1 := <-te.ch + s2 := <-te.ch + + checkSpanData(t, s1, s2, "testpb.Foo.Multiple", true) + + select { + case <-te.ch: + t.Fatal("received extra exported spans") + case <-time.After(time.Second / 10): + } +} + +func TestStreamingFail(t *testing.T) { + trace.SetDefaultSampler(trace.AlwaysSample()) + te := testExporter{make(chan *trace.SpanData)} + trace.RegisterExporter(&te) + defer trace.UnregisterExporter(&te) + + client, cleanup := testpb.NewTestClient(t) + + stream, err := client.Multiple(context.Background()) + if err != nil { + t.Fatalf("Call failed: %v", err) + } + + err = stream.Send(&testpb.FooRequest{Fail: true}) + if err != nil { + t.Fatalf("Couldn't send streaming request: %v", err) + } + stream.CloseSend() + + for { + _, err := stream.Recv() + if err == nil || err == io.EOF { + t.Errorf("stream.Recv() = %v; want errors", err) + } else { + break + } + } + + s1 := <-te.ch + s2 := <-te.ch + + checkSpanData(t, s1, s2, "testpb.Foo.Multiple", false) + cleanup() + + select { + case <-te.ch: + t.Fatal("received extra exported spans") + case <-time.After(time.Second / 10): + } +} + +func TestSingle(t *testing.T) { + trace.SetDefaultSampler(trace.AlwaysSample()) + te := testExporter{make(chan *trace.SpanData)} + trace.RegisterExporter(&te) + defer trace.UnregisterExporter(&te) + + client, cleanup := testpb.NewTestClient(t) + + _, err := client.Single(context.Background(), &testpb.FooRequest{}) + if err != nil { + t.Fatalf("Couldn't send request: %v", err) + } + + s1 := <-te.ch + s2 := <-te.ch + + checkSpanData(t, s1, s2, "testpb.Foo.Single", true) + cleanup() + + select { + case <-te.ch: + t.Fatal("received extra exported spans") + case <-time.After(time.Second / 10): + } +} + +func TestServerSpanDuration(t *testing.T) { + client, cleanup := testpb.NewTestClient(t) + defer cleanup() + + te := testExporter{make(chan *trace.SpanData, 100)} + trace.RegisterExporter(&te) + defer trace.UnregisterExporter(&te) + + trace.SetDefaultSampler(trace.AlwaysSample()) + + ctx := context.Background() + const sleep = 100 * time.Millisecond + client.Single(ctx, &testpb.FooRequest{SleepNanos: int64(sleep)}) + +loop: + for { + select { + case span := <-te.ch: + if span.SpanKind != trace.SpanKindServer { + continue loop + } + if got, want := span.EndTime.Sub(span.StartTime), sleep; got < want { + t.Errorf("span duration = %dns; want at least %dns", got, want) + } + break loop + default: + t.Fatal("no more spans") + } + } +} + +func TestSingleFail(t *testing.T) { + trace.SetDefaultSampler(trace.AlwaysSample()) + te := testExporter{make(chan *trace.SpanData)} + trace.RegisterExporter(&te) + defer trace.UnregisterExporter(&te) + + client, cleanup := testpb.NewTestClient(t) + + _, err := client.Single(context.Background(), &testpb.FooRequest{Fail: true}) + if err == nil { + t.Fatalf("Got nil error from request, want non-nil") + } + + s1 := <-te.ch + s2 := <-te.ch + + checkSpanData(t, s1, s2, "testpb.Foo.Single", false) + cleanup() + + select { + case <-te.ch: + t.Fatal("received extra exported spans") + case <-time.After(time.Second / 10): + } +} + +func checkSpanData(t *testing.T, s1, s2 *trace.SpanData, methodName string, success bool) { + t.Helper() + + if s1.SpanKind == trace.SpanKindServer { + s1, s2 = s2, s1 + } + + if got, want := s1.Name, methodName; got != want { + t.Errorf("Got name %q want %q", got, want) + } + if got, want := s2.Name, methodName; got != want { + t.Errorf("Got name %q want %q", got, want) + } + if got, want := s2.SpanContext.TraceID, s1.SpanContext.TraceID; got != want { + t.Errorf("Got trace IDs %s and %s, want them equal", got, want) + } + if got, want := s2.ParentSpanID, s1.SpanContext.SpanID; got != want { + t.Errorf("Got ParentSpanID %s, want %s", got, want) + } + if got := (s1.Status.Code == 0); got != success { + t.Errorf("Got success=%t want %t", got, success) + } + if got := (s2.Status.Code == 0); got != success { + t.Errorf("Got success=%t want %t", got, success) + } + if s1.HasRemoteParent { + t.Errorf("Got HasRemoteParent=%t, want false", s1.HasRemoteParent) + } + if !s2.HasRemoteParent { + t.Errorf("Got HasRemoteParent=%t, want true", s2.HasRemoteParent) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go index 2be6beefc..37f42b3b1 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -32,18 +32,15 @@ type Transport struct { // the returned round tripper will be cancelable. Base http.RoundTripper - // NoStats may be set to disable recording of stats. - NoStats bool - - // NoTrace may be set to disable recording of traces. - NoTrace bool - // Propagation defines how traces are propagated. If unspecified, a default // (currently B3 format) will be used. Propagation propagation.HTTPFormat // StartOptions are applied to the span started by this Transport around each // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. StartOptions trace.StartOptions // TODO: Implement tag propagation for HTTP. @@ -53,22 +50,19 @@ type Transport struct { func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base() // TODO: remove excessive nesting of http.RoundTrippers here. - if !t.NoTrace { - format := t.Propagation - if format == nil { - format = defaultFormat - } - rt = &traceTransport{ - base: rt, - format: format, - startOptions: t.StartOptions, - } + format := t.Propagation + if format == nil { + format = defaultFormat } - if !t.NoStats { - rt = statsTransport{ - base: rt, - } + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: t.StartOptions.Sampler, + SpanKind: trace.SpanKindClient, + }, } + rt = statsTransport{base: rt} return rt.RoundTrip(req) } diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_test.go b/vendor/go.opencensus.io/plugin/ochttp/client_test.go index 1df1c57df..a1fb2f71d 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_test.go @@ -36,7 +36,7 @@ func TestClient(t *testing.T) { })) defer server.Close() - for _, v := range ochttp.DefaultViews { + for _, v := range ochttp.DefaultClientViews { v.Subscribe() } @@ -123,19 +123,13 @@ func TestClient(t *testing.T) { } } -func BenchmarkTransportNoInstrumentation(b *testing.B) { - benchmarkClientServer(b, &ochttp.Transport{NoStats: true, NoTrace: true}) +var noTrace = trace.StartOptions{Sampler: trace.NeverSample()} + +func BenchmarkTransportNoTrace(b *testing.B) { + benchmarkClientServer(b, &ochttp.Transport{StartOptions: noTrace}) } -func BenchmarkTransportTraceOnly(b *testing.B) { - benchmarkClientServer(b, &ochttp.Transport{NoStats: true}) -} - -func BenchmarkTransportStatsOnly(b *testing.B) { - benchmarkClientServer(b, &ochttp.Transport{NoTrace: true}) -} - -func BenchmarkTransportAllInstrumentation(b *testing.B) { +func BenchmarkTransport(b *testing.B) { benchmarkClientServer(b, &ochttp.Transport{}) } @@ -167,19 +161,11 @@ func benchmarkClientServer(b *testing.B, transport *ochttp.Transport) { } } -func BenchmarkTransportParallel64NoInstrumentation(b *testing.B) { - benchmarkClientServerParallel(b, 64, &ochttp.Transport{NoTrace: true, NoStats: true}) +func BenchmarkTransportParallel64NoTrace(b *testing.B) { + benchmarkClientServerParallel(b, 64, &ochttp.Transport{StartOptions: noTrace}) } -func BenchmarkTransportParallel64TraceOnly(b *testing.B) { - benchmarkClientServerParallel(b, 64, &ochttp.Transport{NoStats: true}) -} - -func BenchmarkTransportParallel64StatsOnly(b *testing.B) { - benchmarkClientServerParallel(b, 64, &ochttp.Transport{NoTrace: true}) -} - -func BenchmarkTransportParallel64AllInstrumentation(b *testing.B) { +func BenchmarkTransportParallel64(b *testing.B) { benchmarkClientServerParallel(b, 64, &ochttp.Transport{}) } diff --git a/vendor/go.opencensus.io/plugin/ochttp/example_test.go b/vendor/go.opencensus.io/plugin/ochttp/example_test.go index fa4dd9370..a92cf02c0 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/example_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/example_test.go @@ -25,20 +25,20 @@ import ( ) func ExampleTransport() { - err := view.Subscribe( - // Subscribe to a few default views, with renaming + if err := view.Subscribe( + // Subscribe to a few default views. ochttp.ClientRequestCountByMethod, ochttp.ClientResponseCountByStatusCode, ochttp.ClientLatencyView, - // Subscribe to a custom view + + // Subscribe to a custom view. &view.View{ Name: "httpclient_latency_by_hostpath", TagKeys: []tag.Key{ochttp.Host, ochttp.Path}, Measure: ochttp.ClientLatency, Aggregation: ochttp.DefaultLatencyDistribution, }, - ) - if err != nil { + ); err != nil { log.Fatal(err) } diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go deleted file mode 100644 index f73bb803a..000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package google is deprecated: Use go.opencensus.io/exporter/stackdriver/propagation. -package google // import "go.opencensus.io/plugin/ochttp/propagation/google" - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" -) - -const ( - httpHeaderMaxSize = 200 - httpHeader = `X-Cloud-Trace-Context` -) - -// Deprecated: Use go.opencensus.io/exporter/stackdriver/propagation.HTTPFormat -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(httpHeader) - // See https://cloud.google.com/trace/docs/faq for the header format. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(httpHeader, header) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google_test.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google_test.go deleted file mode 100644 index fed05fe44..000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/google/google_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package google - -import ( - "net/http" - "reflect" - "testing" - - "go.opencensus.io/trace" -) - -func TestHTTPFormat(t *testing.T) { - format := &HTTPFormat{} - - traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} - spanID1 := [8]byte{255, 0, 0, 0, 0, 0, 0, 123} - spanID2 := [8]byte{0, 0, 0, 0, 0, 0, 0, 123} - tests := []struct { - incoming string - wantSpanContext trace.SpanContext - }{ - { - incoming: "105445aa7843bc8bf206b12000100000/18374686479671623803;o=1", - wantSpanContext: trace.SpanContext{ - TraceID: traceID, - SpanID: spanID1, - TraceOptions: 1, - }, - }, - { - incoming: "105445aa7843bc8bf206b12000100000/123;o=0", - wantSpanContext: trace.SpanContext{ - TraceID: traceID, - SpanID: spanID2, - TraceOptions: 0, - }, - }, - } - for _, tt := range tests { - t.Run(tt.incoming, func(t *testing.T) { - req, _ := http.NewRequest("GET", "http://example.com", nil) - req.Header.Add(httpHeader, tt.incoming) - sc, ok := format.SpanContextFromRequest(req) - if !ok { - t.Errorf("exporter.SpanContextFromRequest() = false; want true") - } - if got, want := sc, tt.wantSpanContext; !reflect.DeepEqual(got, want) { - t.Errorf("exporter.SpanContextFromRequest() returned span context %v; want %v", got, want) - } - - req, _ = http.NewRequest("GET", "http://example.com", nil) - format.SpanContextToRequest(sc, req) - if got, want := req.Header.Get(httpHeader), tt.incoming; got != want { - t.Errorf("exporter.SpanContextToRequest() returned header %q; want %q", got, want) - } - }) - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 1c95d1466..b402a65f3 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -38,12 +38,6 @@ import ( // // Incoming propagation mechanism is determined by the given HTTP propagators. type Handler struct { - // NoStats may be set to disable recording of stats. - NoStats bool - - // NoTrace may be set to disable recording of traces. - NoTrace bool - // Propagation defines how traces are propagated. If unspecified, // B3 propagation will be used. Propagation propagation.HTTPFormat @@ -53,20 +47,24 @@ type Handler struct { // StartOptions are applied to the span started by this Handler around each // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. StartOptions trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !h.NoTrace { - var end func() - r, end = h.startTrace(w, r) - defer end() - } - if !h.NoStats { - var end func() - w, end = h.startStats(w, r) - defer end() - } + var traceEnd, statsEnd func() + r, traceEnd = h.startTrace(w, r) + defer traceEnd() + w, statsEnd = h.startStats(w, r) + defer statsEnd() handler := h.Handler if handler == nil { @@ -76,23 +74,41 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - name := spanNameFromURL("Recv", r.URL) - p := h.Propagation - if p == nil { - p = defaultFormat + opts := trace.StartOptions{ + Sampler: h.StartOptions.Sampler, + SpanKind: trace.SpanKindServer, } + + name := spanNameFromURL(r.URL) ctx := r.Context() var span *trace.Span - if sc, ok := p.SpanContextFromRequest(r); ok { - span = trace.NewSpanWithRemoteParent(name, sc, h.StartOptions) + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + span = trace.NewSpanWithRemoteParent(name, sc, opts) + ctx = trace.WithSpan(ctx, span) } else { - span = trace.NewSpan(name, nil, h.StartOptions) + span = trace.NewSpan(name, nil, opts) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeChild, + Attributes: nil, + }) + } } ctx = trace.WithSpan(ctx, span) - span.SetAttributes(requestAttrs(r)...) + span.AddAttributes(requestAttrs(r)...) return r.WithContext(trace.WithSpan(r.Context(), span)), span.End } +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { ctx, _ := tag.New(r.Context(), tag.Upsert(Host, r.URL.Host), diff --git a/vendor/go.opencensus.io/plugin/ochttp/server_test.go b/vendor/go.opencensus.io/plugin/ochttp/server_test.go index 70c4f2de8..bdfe8b0b0 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server_test.go @@ -7,6 +7,7 @@ import ( "testing" "go.opencensus.io/stats/view" + "go.opencensus.io/trace" ) func httpHandler(statusCode, respSize int) http.Handler { @@ -25,7 +26,7 @@ func updateMean(mean float64, sample, count int) float64 { } func TestHandlerStatsCollection(t *testing.T) { - for _, v := range DefaultViews { + for _, v := range DefaultServerViews { v.Subscribe() } @@ -53,9 +54,9 @@ func TestHandlerStatsCollection(t *testing.T) { r := httptest.NewRequest(test.method, test.target, body) w := httptest.NewRecorder() h := &Handler{ - NoTrace: true, Handler: httpHandler(test.statusCode, test.respSize), } + h.StartOptions.Sampler = trace.NeverSample() for i := 0; i < test.count; i++ { h.ServeHTTP(w, r) diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 02e7f68dd..02858a5f8 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -56,8 +56,8 @@ var ( // Default distributions used by views in this package. var ( - DefaultSizeDistribution = view.DistributionAggregation{0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} - DefaultLatencyDistribution = view.DistributionAggregation{0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000} + DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) ) // Package ochttp provides some convenience views. @@ -67,7 +67,7 @@ var ( Name: "opencensus.io/http/client/request_count", Description: "Count of HTTP requests started", Measure: ClientRequestCount, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } ClientRequestBytesView = &view.View{ @@ -96,7 +96,7 @@ var ( Description: "Client request count by HTTP method", TagKeys: []tag.Key{Method}, Measure: ClientRequestCount, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } ClientResponseCountByStatusCode = &view.View{ @@ -104,14 +104,14 @@ var ( Description: "Client response count by status code", TagKeys: []tag.Key{StatusCode}, Measure: ClientLatency, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", Description: "Count of HTTP requests started", Measure: ServerRequestCount, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } ServerRequestBytesView = &view.View{ @@ -140,7 +140,7 @@ var ( Description: "Server request count by HTTP method", TagKeys: []tag.Key{Method}, Measure: ServerRequestCount, - Aggregation: view.CountAggregation{}, + Aggregation: view.Count(), } ServerResponseCountByStatusCode = &view.View{ @@ -148,21 +148,26 @@ var ( Description: "Server response count by status code", TagKeys: []tag.Key{StatusCode}, Measure: ServerLatency, - Aggregation: view.CountAggregation{}, - } - - DefaultViews = []*view.View{ - ClientRequestCountView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientLatencyView, - ClientRequestCountByMethod, - ClientResponseCountByStatusCode, - ServerRequestCountView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerLatencyView, - ServerRequestCountByMethod, - ServerResponseCountByStatusCode, + Aggregation: view.Count(), } ) + +// DefaultClientViews are the default client views provided by this package. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index 70f18ac78..10d4a7060 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -18,7 +18,6 @@ import ( "io" "net/http" "net/url" - "sync" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" @@ -51,7 +50,7 @@ type traceTransport struct { // The created span can follow a parent span, if a parent is presented in // the request's context. func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := spanNameFromURL("Sent", req.URL) + name := spanNameFromURL(req.URL) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. parent := trace.FromContext(req.Context()) @@ -62,7 +61,7 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { t.format.SpanContextToRequest(span.SpanContext(), req) } - span.SetAttributes(requestAttrs(req)...) + span.AddAttributes(requestAttrs(req)...) resp, err := t.base.RoundTrip(req) if err != nil { span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) @@ -70,38 +69,37 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { return resp, err } - span.SetAttributes(responseAttrs(resp)...) + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(status(resp.StatusCode)) // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. - resp.Body = &spanEndBody{rc: resp.Body, span: span} + resp.Body = &bodyTracker{rc: resp.Body, span: span} return resp, err } -// spanEndBody wraps a response.Body and invokes +// bodyTracker wraps a response.Body and invokes // trace.EndSpan on encountering io.EOF on reading // the body of the original response. -type spanEndBody struct { +type bodyTracker struct { rc io.ReadCloser span *trace.Span - - endSpanOnce sync.Once } -var _ io.ReadCloser = (*spanEndBody)(nil) +var _ io.ReadCloser = (*bodyTracker)(nil) -func (seb *spanEndBody) Read(b []byte) (int, error) { - n, err := seb.rc.Read(b) +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) switch err { case nil: return n, nil case io.EOF: - seb.endSpan() + bt.span.End() default: // For all other errors, set the span status - seb.span.SetStatus(trace.Status{ + bt.span.SetStatus(trace.Status{ // Code 2 is the error code for Internal server error. Code: 2, Message: err.Error(), @@ -110,19 +108,12 @@ func (seb *spanEndBody) Read(b []byte) (int, error) { return n, err } -// endSpan invokes trace.EndSpan exactly once -func (seb *spanEndBody) endSpan() { - seb.endSpanOnce.Do(func() { - seb.span.End() - }) -} - -func (seb *spanEndBody) Close() error { +func (bt *bodyTracker) Close() error { // Invoking endSpan on Close will help catch the cases // in which a read returned a non-nil error, we set the // span status but didn't end the span. - seb.endSpan() - return seb.rc.Close() + bt.span.End() + return bt.rc.Close() } // CancelRequest cancels an in-flight request by closing its connection. @@ -135,13 +126,8 @@ func (t *traceTransport) CancelRequest(req *http.Request) { } } -func spanNameFromURL(prefix string, u *url.URL) string { - host := u.Hostname() - port := ":" + u.Port() - if port == ":" || port == ":80" || port == ":443" { - port = "" - } - return prefix + "." + host + port + u.Path +func spanNameFromURL(u *url.URL) string { + return u.Path } func requestAttrs(r *http.Request) []trace.Attribute { @@ -158,3 +144,72 @@ func responseAttrs(resp *http.Response) []trace.Attribute { trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), } } + +func status(statusCode int) trace.Status { + var code int32 + if statusCode < 200 || statusCode >= 400 { + code = codeUnknown + } + switch statusCode { + case 499: + code = codeCancelled + case http.StatusBadRequest: + code = codeInvalidArgument + case http.StatusGatewayTimeout: + code = codeDeadlineExceeded + case http.StatusNotFound: + code = codeNotFound + case http.StatusForbidden: + code = codePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = codeUnathenticated + case http.StatusTooManyRequests: + code = codeResourceExhausted + case http.StatusNotImplemented: + code = codeUnimplemented + case http.StatusServiceUnavailable: + code = codeUnavailable + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +// TODO(jbd): Provide status codes from trace package. +const ( + codeOK = 0 + codeCancelled = 1 + codeUnknown = 2 + codeInvalidArgument = 3 + codeDeadlineExceeded = 4 + codeNotFound = 5 + codeAlreadyExists = 6 + codePermissionDenied = 7 + codeResourceExhausted = 8 + codeFailedPrecondition = 9 + codeAborted = 10 + codeOutOfRange = 11 + codeUnimplemented = 12 + codeInternal = 13 + codeUnavailable = 14 + codeDataLoss = 15 + codeUnathenticated = 16 +) + +var codeToStr = map[int32]string{ + codeOK: `"OK"`, + codeCancelled: `"CANCELLED"`, + codeUnknown: `"UNKNOWN"`, + codeInvalidArgument: `"INVALID_ARGUMENT"`, + codeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, + codeNotFound: `"NOT_FOUND"`, + codeAlreadyExists: `"ALREADY_EXISTS"`, + codePermissionDenied: `"PERMISSION_DENIED"`, + codeResourceExhausted: `"RESOURCE_EXHAUSTED"`, + codeFailedPrecondition: `"FAILED_PRECONDITION"`, + codeAborted: `"ABORTED"`, + codeOutOfRange: `"OUT_OF_RANGE"`, + codeUnimplemented: `"UNIMPLEMENTED"`, + codeInternal: `"INTERNAL"`, + codeUnavailable: `"UNAVAILABLE"`, + codeDataLoss: `"DATA_LOSS"`, + codeUnathenticated: `"UNAUTHENTICATED"`, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace_test.go b/vendor/go.opencensus.io/plugin/ochttp/trace_test.go index 7cc843e59..cfad50c3c 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace_test.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace_test.go @@ -31,6 +31,8 @@ import ( "testing" "time" + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" "go.opencensus.io/trace" ) @@ -91,7 +93,6 @@ func TestTransport_RoundTrip(t *testing.T) { transport := &testTransport{ch: make(chan *http.Request, 1)} rt := &Transport{ - NoStats: true, Propagation: &testPropagator{}, Base: transport, } @@ -141,8 +142,6 @@ func TestHandler(t *testing.T) { for _, tt := range tests { t.Run(tt.header, func(t *testing.T) { - propagator := &testPropagator{} - handler := &Handler{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.FromContext(r.Context()) @@ -155,7 +154,7 @@ func TestHandler(t *testing.T) { } }), StartOptions: trace.StartOptions{Sampler: trace.ProbabilitySampler(0.0)}, - Propagation: propagator, + Propagation: &testPropagator{}, } req, _ := http.NewRequest("GET", "http://foo.com", nil) req.Header.Add("trace", tt.header) @@ -173,116 +172,169 @@ func (c *collector) ExportSpan(s *trace.SpanData) { } func TestEndToEnd(t *testing.T) { - var spans collector - trace.RegisterExporter(&spans) - defer trace.UnregisterExporter(&spans) + trace.SetDefaultSampler(trace.AlwaysSample()) - span := trace.NewSpan( - "top-level", - nil, - trace.StartOptions{ - Sampler: trace.AlwaysSample(), + tc := []struct { + name string + handler *Handler + transport *Transport + wantSameTraceID bool + wantLinks bool // expect a link between client and server span + }{ + { + name: "internal default propagation", + handler: &Handler{}, + transport: &Transport{}, + wantSameTraceID: true, + }, + { + name: "external default propagation", + handler: &Handler{IsPublicEndpoint: true}, + transport: &Transport{}, + wantSameTraceID: false, + wantLinks: true, + }, + { + name: "internal TraceContext propagation", + handler: &Handler{Propagation: &tracecontext.HTTPFormat{}}, + transport: &Transport{Propagation: &tracecontext.HTTPFormat{}}, + wantSameTraceID: true, + }, + { + name: "misconfigured propagation", + handler: &Handler{IsPublicEndpoint: true, Propagation: &tracecontext.HTTPFormat{}}, + transport: &Transport{Propagation: &b3.HTTPFormat{}}, + wantSameTraceID: false, + wantLinks: false, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + var spans collector + trace.RegisterExporter(&spans) + defer trace.UnregisterExporter(&spans) + + // Start the server. + serverDone := make(chan struct{}) + serverReturn := make(chan time.Time) + url := serveHTTP(tt.handler, serverDone, serverReturn) + + // Start a root Span in the client. + root := trace.NewSpan( + "top-level", + nil, + trace.StartOptions{}) + ctx := trace.WithSpan(context.Background(), root) + + // Make the request. + req, err := http.NewRequest( + http.MethodPost, + fmt.Sprintf("%s/example/url/path?qparam=val", url), + strings.NewReader("expected-request-body")) + if err != nil { + t.Fatal(err) + } + req = req.WithContext(ctx) + resp, err := tt.transport.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("resp.StatusCode = %d", resp.StatusCode) + } + + // Tell the server to return from request handling. + serverReturn <- time.Now().Add(time.Millisecond) + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if got, want := string(respBody), "expected-response"; got != want { + t.Fatalf("respBody = %q; want %q", got, want) + } + + resp.Body.Close() + + <-serverDone + trace.UnregisterExporter(&spans) + + if got, want := len(spans), 2; got != want { + t.Fatalf("len(spans) = %d; want %d", got, want) + } + + var client, server *trace.SpanData + for _, sp := range spans { + switch sp.SpanKind { + case trace.SpanKindClient: + client = sp + if got, want := client.Name, "/example/url/path"; got != want { + t.Errorf("Span name: %q; want %q", got, want) + } + case trace.SpanKindServer: + server = sp + if got, want := server.Name, "/example/url/path"; got != want { + t.Errorf("Span name: %q; want %q", got, want) + } + default: + t.Fatalf("server or client span missing") + } + } + + if tt.wantSameTraceID { + if server.TraceID != client.TraceID { + t.Errorf("TraceID does not match: server.TraceID=%q client.TraceID=%q", server.TraceID, client.TraceID) + } + if !server.HasRemoteParent { + t.Errorf("server span should have remote parent") + } + if server.ParentSpanID != client.SpanID { + t.Errorf("server span should have client span as parent") + } + } + if !tt.wantSameTraceID { + if server.TraceID == client.TraceID { + t.Errorf("TraceID should not be trusted") + } + } + if tt.wantLinks { + if got, want := len(server.Links), 1; got != want { + t.Errorf("len(server.Links) = %d; want %d", got, want) + } else { + link := server.Links[0] + if got, want := link.TraceID, root.SpanContext().TraceID; got != want { + t.Errorf("link.TraceID = %q; want %q", got, want) + } + if got, want := link.Type, trace.LinkTypeChild; got != want { + t.Errorf("link.Type = %v; want %v", got, want) + } + } + } + if server.StartTime.Before(client.StartTime) { + t.Errorf("server span starts before client span") + } + if server.EndTime.After(client.EndTime) { + t.Errorf("client span ends before server span") + } }) - ctx := trace.WithSpan(context.Background(), span) - - serverDone := make(chan struct{}) - serverReturn := make(chan time.Time) - url := serveHTTP(serverDone, serverReturn) - - req, err := http.NewRequest( - "POST", - fmt.Sprintf("%s/example/url/path?qparam=val", url), - strings.NewReader("expected-request-body")) - if err != nil { - t.Fatalf("unexpected error %#v", err) - } - req = req.WithContext(ctx) - - rt := &Transport{ - NoStats: true, - Propagation: defaultFormat, - Base: http.DefaultTransport, - } - resp, err := rt.RoundTrip(req) - if err != nil { - t.Fatalf("unexpected error %s", err) - } - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected stats: %d", resp.StatusCode) - } - - serverReturn <- time.Now().Add(time.Millisecond) - - respBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("unexpected read error: %#v", err) - } - if string(respBody) != "expected-response" { - t.Fatalf("unexpected response: %s", string(respBody)) - } - - resp.Body.Close() - - <-serverDone - trace.UnregisterExporter(&spans) - - if got, want := len(spans), 2; got != want { - t.Fatalf("len(%#v) = %d; want %d", spans, got, want) - } - - var client, server *trace.SpanData - for _, sp := range spans { - if strings.HasPrefix(sp.Name, "Sent.") { - client = sp - serverHostport := req.URL.Hostname() + ":" + req.URL.Port() - if got, want := client.Name, "Sent."+serverHostport+"/example/url/path"; got != want { - t.Errorf("Span name: %q; want %q", got, want) - } - } else if strings.HasPrefix(sp.Name, "Recv.") { - server = sp - if got, want := server.Name, "Recv./example/url/path"; got != want { - t.Errorf("Span name: %q; want %q", got, want) - } - } - } - - if server == nil || client == nil { - t.Fatalf("server or client span missing") - } - if server.TraceID != client.TraceID { - t.Errorf("TraceID does not match: server.TraceID=%q client.TraceID=%q", server.TraceID, client.TraceID) - } - if server.StartTime.Before(client.StartTime) { - t.Errorf("server span starts before client span") - } - if server.EndTime.After(client.EndTime) { - t.Errorf("client span ends before server span") - } - if !server.HasRemoteParent { - t.Errorf("server span should have remote parent") - } - if server.ParentSpanID != client.SpanID { - t.Errorf("server span should have client span as parent") } } -func serveHTTP(done chan struct{}, wait chan time.Time) string { - handler := &Handler{ - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - w.(http.Flusher).Flush() +func serveHTTP(handler *Handler, done chan struct{}, wait chan time.Time) string { + handler.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.(http.Flusher).Flush() - // simulate a slow-responding server - sleepUntil := <-wait - for time.Now().Before(sleepUntil) { - time.Sleep(sleepUntil.Sub(time.Now())) - } - - io.WriteString(w, "expected-response") - close(done) - }), - } + // Simulate a slow-responding server. + sleepUntil := <-wait + for time.Now().Before(sleepUntil) { + time.Sleep(sleepUntil.Sub(time.Now())) + } + io.WriteString(w, "expected-response") + close(done) + }) server := httptest.NewServer(handler) go func() { <-done @@ -293,38 +345,25 @@ func serveHTTP(done chan struct{}, wait chan time.Time) string { func TestSpanNameFromURL(t *testing.T) { tests := []struct { - prefix string - u string - want string + u string + want string }{ { - prefix: "Sent", - u: "http://localhost:80/hello?q=a", - want: "Sent.localhost/hello", + u: "http://localhost:80/hello?q=a", + want: "/hello", }, { - prefix: "Recv", - u: "https://localhost:443/a", - want: "Recv.localhost/a", - }, - { - prefix: "Recv", - u: "https://example.com:7654/a", - want: "Recv.example.com:7654/a", - }, - { - prefix: "Sent", - u: "/a/b?q=c", - want: "Sent./a/b", + u: "/a/b?q=c", + want: "/a/b", }, } for _, tt := range tests { - t.Run(tt.prefix+"-"+tt.u, func(t *testing.T) { + t.Run(tt.u, func(t *testing.T) { u, err := url.Parse(tt.u) if err != nil { t.Errorf("url.Parse() = %v", err) } - if got := spanNameFromURL(tt.prefix, u); got != tt.want { + if got := spanNameFromURL(u); got != tt.want { t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) } }) @@ -340,13 +379,13 @@ func TestRequestAttributes(t *testing.T) { { name: "GET example.com/hello", makeReq: func() *http.Request { - req, _ := http.NewRequest("GET", "http://example.com/hello", nil) + req, _ := http.NewRequest("GET", "http://example.com:779/hello", nil) req.Header.Add("User-Agent", "ua") return req }, wantAttrs: []trace.Attribute{ trace.StringAttribute("http.path", "/hello"), - trace.StringAttribute("http.host", "example.com"), + trace.StringAttribute("http.host", "example.com:779"), trace.StringAttribute("http.method", "GET"), trace.StringAttribute("http.user_agent", "ua"), }, @@ -394,3 +433,27 @@ func TestResponseAttributes(t *testing.T) { }) } } + +func TestStatusUnitTest(t *testing.T) { + tests := []struct { + in int + want trace.Status + }{ + {200, trace.Status{Code: 0, Message: `"OK"`}}, + {100, trace.Status{Code: 2, Message: `"UNKNOWN"`}}, + {500, trace.Status{Code: 2, Message: `"UNKNOWN"`}}, + {404, trace.Status{Code: 5, Message: `"NOT_FOUND"`}}, + {600, trace.Status{Code: 2, Message: `"UNKNOWN"`}}, + {401, trace.Status{Code: 16, Message: `"UNAUTHENTICATED"`}}, + {403, trace.Status{Code: 7, Message: `"PERMISSION_DENIED"`}}, + {301, trace.Status{Code: 0, Message: `"OK"`}}, + {501, trace.Status{Code: 12, Message: `"UNIMPLEMENTED"`}}, + } + + for _, tt := range tests { + got, want := status(tt.in), tt.want + if got != want { + t.Errorf("status(%d) got = (%#v) want = (%#v)", tt.in, got, want) + } + } +} diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 13d1c02b9..6341eb2ad 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -18,7 +18,8 @@ import ( "go.opencensus.io/tag" ) -type Recorder func(*tag.Map, interface{}) - // DefaultRecorder will be called for each Record call. -var DefaultRecorder Recorder = nil +var DefaultRecorder func(*tag.Map, interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go index 5c575daee..ad00aff2a 100644 --- a/vendor/go.opencensus.io/stats/measure.go +++ b/vendor/go.opencensus.io/stats/measure.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "go.opencensus.io/stats/internal" ) @@ -37,14 +38,27 @@ type Measure interface { Name() string Description() string Unit() string + + subscribe() + subscribed() bool } type measure struct { + subs int32 // access atomically + name string description string unit string } +func (m *measure) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measure) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + // Name returns the name of the measure. func (m *measure) Name() string { return m.name @@ -61,10 +75,12 @@ func (m *measure) Unit() string { } var ( - mu sync.RWMutex - measures = make(map[string]Measure) - errDuplicate = errors.New("duplicate measure name") + mu sync.RWMutex + measures = make(map[string]Measure) +) +var ( + errDuplicate = errors.New("duplicate measure name") errMeasureNameTooLong = fmt.Errorf("measure name cannot be longer than %v", internal.MaxNameLength) ) diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go index 9f1bc6841..7009ceea0 100644 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -20,9 +20,20 @@ type Float64Measure struct { measure } +func (m *Float64Measure) subscribe() { + m.measure.subscribe() +} + +func (m *Float64Measure) subscribed() bool { + return m.measure.subscribed() +} + // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { + if !m.subscribed() { + return Measurement{} + } return Measurement{m: m, v: v} } diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go index 5ff3bcd1b..5b820b6a6 100644 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -20,9 +20,20 @@ type Int64Measure struct { measure } +func (m *Int64Measure) subscribe() { + m.measure.subscribe() +} + +func (m *Int64Measure) subscribed() bool { + return m.measure.subscribed() +} + // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { + if !m.subscribed() { + return Measurement{} + } return Measurement{m: m, v: float64(v)} } diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 791d35ef2..98865ff69 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -22,9 +22,30 @@ import ( "go.opencensus.io/tag" ) +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + // Record records one or multiple measurements with the same tags at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { + if len(ms) == 0 { + return + } + var record bool + for _, m := range ms { + if (m != Measurement{}) { + record = true + break + } + } + if !record { + return + } if internal.DefaultRecorder != nil { internal.DefaultRecorder(tag.FromContext(ctx), ms) } diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 66fda6023..3ae6d02bd 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -15,73 +15,98 @@ package view -// Aggregation represents a data aggregation method. There are several -// aggregation methods made available in the package such as -// CountAggregation, SumAggregation, MeanAggregation and -// DistributionAggregation. -type Aggregation interface { - isAggregation() bool - newData() AggregationData +//go:generate stringer -type AggType + +// AggType represents the type of aggregation function used on a View. +type AggType int + +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeMean // the mean aggregation, see Mean. + AggTypeDistribution // the distribution aggregation, see Distribution. +) + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, Mean, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData } -// CountAggregation indicates that data collected and aggregated +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return newCountData(0) + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return newSumData(0) + }, + } + aggMean = &Aggregation{ + Type: AggTypeMean, + newData: func() AggregationData { + return newMeanData(0, 0) + }, + } +) + +// Count indicates that data collected and aggregated // with this method will be turned into a count value. // For example, total number of accepted requests can be -// aggregated by using CountAggregation. -type CountAggregation struct{} - -func (a CountAggregation) isAggregation() bool { return true } - -func (a CountAggregation) newData() AggregationData { - return newCountData(0) +// aggregated by using Count. +func Count() *Aggregation { + return aggCount } -// SumAggregation indicates that data collected and aggregated +// Sum indicates that data collected and aggregated // with this method will be summed up. // For example, accumulated request bytes can be aggregated by using -// SumAggregation. -type SumAggregation struct{} - -func (a SumAggregation) isAggregation() bool { return true } - -func (a SumAggregation) newData() AggregationData { - return newSumData(0) +// Sum. +func Sum() *Aggregation { + return aggSum } -// MeanAggregation indicates that collect and aggregate data and maintain +// Mean indicates that collect and aggregate data and maintain // the mean value. // For example, average latency in milliseconds can be aggregated by using -// MeanAggregation. -type MeanAggregation struct{} - -func (a MeanAggregation) isAggregation() bool { return true } - -func (a MeanAggregation) newData() AggregationData { - return newMeanData(0, 0) +// Mean, although in most cases it is preferable to use a Distribution. +func Mean() *Aggregation { + return aggMean } -// DistributionAggregation indicates that the desired aggregation is +// Distribution indicates that the desired aggregation is // a histogram distribution. +// // An distribution aggregation may contain a histogram of the values in the // population. The bucket boundaries for that histogram are described -// by DistributionAggregation slice. This defines length+1 buckets. +// by the bounds. This defines len(bounds)+1 buckets. // -// If length >= 2 then the boundaries for bucket index i are: +// If len(bounds) >= 2 then the boundaries for bucket index i are: // // [-infinity, bounds[i]) for i = 0 // [bounds[i-1], bounds[i]) for 0 < i < length // [bounds[i-1], +infinity) for i = length // -// If length is 0 then there is no histogram associated with the +// If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries // (-infinity, +infinity). // -// If length is 1 then there is no finite buckets, and that single +// If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. -type DistributionAggregation []float64 - -func (a DistributionAggregation) isAggregation() bool { return true } - -func (a DistributionAggregation) newData() AggregationData { - return newDistributionData([]float64(a)) +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } } diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index bf03a96fd..8d74bc47f 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -31,7 +31,7 @@ type AggregationData interface { const epsilon = 1e-9 -// CountData is the aggregated data for a CountAggregation. +// CountData is the aggregated data for the Count aggregation. // A count aggregation processes data and counts the recordings. // // Most users won't directly access count data. @@ -61,7 +61,7 @@ func (a *CountData) equal(other AggregationData) bool { return int64(*a) == int64(*a2) } -// SumData is the aggregated data for a SumAggregation. +// SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // // Most users won't directly access sum data. @@ -90,7 +90,7 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(float64(*a)-float64(*a2), 2) < epsilon } -// MeanData is the aggregated data for a MeanAggregation. +// MeanData is the aggregated data for the Mean aggregation. // A mean aggregation processes data and maintains the mean value. // // Most users won't directly access mean data. @@ -132,8 +132,8 @@ func (a *MeanData) equal(other AggregationData) bool { return a.Count == a2.Count && math.Pow(a.Mean-a2.Mean, 2) < epsilon } -// DistributionData is the aggregated data for an -// DistributionAggregation. +// DistributionData is the aggregated data for the +// Distribution aggregation. // // Most users won't directly access distribution data. type DistributionData struct { diff --git a/vendor/go.opencensus.io/stats/view/aggtype_string.go b/vendor/go.opencensus.io/stats/view/aggtype_string.go new file mode 100644 index 000000000..b6ad7411b --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type AggType"; DO NOT EDIT. + +package view + +import "strconv" + +const _AggType_name = "AggTypeNoneAggTypeCountAggTypeSumAggTypeMeanAggTypeDistribution" + +var _AggType_index = [...]uint8{0, 11, 23, 33, 44, 63} + +func (i AggType) String() string { + if i < 0 || i >= AggType(len(_AggType_index)-1) { + return "AggType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AggType_name[_AggType_index[i]:_AggType_index[i+1]] +} diff --git a/vendor/go.opencensus.io/stats/view/benchmark_test.go b/vendor/go.opencensus.io/stats/view/benchmark_test.go index d9a7d3e76..939506a75 100644 --- a/vendor/go.opencensus.io/stats/view/benchmark_test.go +++ b/vendor/go.opencensus.io/stats/view/benchmark_test.go @@ -36,7 +36,7 @@ var ( k8, _ = tag.NewKey("k8") view = &View{ Measure: m, - Aggregation: DistributionAggregation{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Aggregation: Distribution(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), TagKeys: []tag.Key{k1, k2}, } ) diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ca74952d1..863a5b62a 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -28,7 +28,7 @@ type collector struct { signatures map[string]AggregationData // Aggregation is the description of the aggregation to perform for this // view. - a Aggregation + a *Aggregation } func (c *collector) addSample(s string, v float64) { diff --git a/vendor/go.opencensus.io/stats/view/example_test.go b/vendor/go.opencensus.io/stats/view/example_test.go index 6e6862a22..d6ab6005f 100644 --- a/vendor/go.opencensus.io/stats/view/example_test.go +++ b/vendor/go.opencensus.io/stats/view/example_test.go @@ -24,13 +24,12 @@ import ( func Example() { m, _ := stats.Int64("my.org/measure/openconns", "open connections", "") - err := view.Subscribe(&view.View{ + if err := view.Subscribe(&view.View{ Name: "my.org/views/openconns", Description: "open connections", Measure: m, - Aggregation: view.DistributionAggregation{0, 1000, 2000}, - }) - if err != nil { + Aggregation: view.Distribution(0, 1000, 2000), + }); err != nil { log.Fatal(err) } diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 97a17816c..d289b590f 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -43,11 +43,11 @@ type View struct { Measure stats.Measure // Aggregation is the aggregation function tp apply to the set of Measurements. - Aggregation Aggregation + Aggregation *Aggregation } // Deprecated: Use &View{}. -func New(name, description string, keys []tag.Key, measure stats.Measure, agg Aggregation) (*View, error) { +func New(name, description string, keys []tag.Key, measure stats.Measure, agg *Aggregation) (*View, error) { if measure == nil { panic("measure may not be nil") } @@ -83,29 +83,26 @@ func (v *View) same(other *View) bool { // canonicalized returns a validated View canonicalized by setting explicit // defaults for Name and Description and sorting the TagKeys -func (v *View) canonicalized() (*View, error) { +func (v *View) canonicalize() error { if v.Measure == nil { - return nil, fmt.Errorf("cannot subscribe view %q: measure not set", v.Name) + return fmt.Errorf("cannot subscribe view %q: measure not set", v.Name) } if v.Aggregation == nil { - return nil, fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name) + return fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name) } - vc := *v - if vc.Name == "" { - vc.Name = vc.Measure.Name() + if v.Name == "" { + v.Name = v.Measure.Name() } - if vc.Description == "" { - vc.Description = vc.Measure.Description() + if v.Description == "" { + v.Description = v.Measure.Description() } - if err := checkViewName(vc.Name); err != nil { - return nil, err + if err := checkViewName(v.Name); err != nil { + return err } - vc.TagKeys = make([]tag.Key, len(v.TagKeys)) - copy(vc.TagKeys, v.TagKeys) - sort.Slice(vc.TagKeys, func(i, j int) bool { - return vc.TagKeys[i].Name() < vc.TagKeys[j].Name() + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() }) - return &vc, nil + return nil } // viewInternal is the internal representation of a View. @@ -116,12 +113,8 @@ type viewInternal struct { } func newViewInternal(v *View) (*viewInternal, error) { - vc, err := v.canonicalized() - if err != nil { - return nil, err - } return &viewInternal{ - view: vc, + view: v, collector: &collector{make(map[string]AggregationData), v.Aggregation}, }, nil } diff --git a/vendor/go.opencensus.io/stats/view/view_test.go b/vendor/go.opencensus.io/stats/view/view_test.go index c82310b94..390a338ac 100644 --- a/vendor/go.opencensus.io/stats/view/view_test.go +++ b/vendor/go.opencensus.io/stats/view/view_test.go @@ -27,7 +27,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { k1, _ := tag.NewKey("k1") k2, _ := tag.NewKey("k2") k3, _ := tag.NewKey("k3") - agg1 := DistributionAggregation{2} + agg1 := Distribution(2) m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitNone) view1 := &View{ TagKeys: []tag.Key{k1, k2}, @@ -65,7 +65,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - 2, 1, 5, 3, 8, []int64{1, 1}, agg1, + 2, 1, 5, 3, 8, []int64{1, 1}, []float64{2}, }, }, }, @@ -80,13 +80,13 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - 1, 1, 1, 1, 0, []int64{1, 0}, agg1, + 1, 1, 1, 1, 0, []int64{1, 0}, []float64{2}, }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ - 1, 5, 5, 5, 0, []int64{0, 1}, agg1, + 1, 5, 5, 5, 0, []int64{0, 1}, []float64{2}, }, }, }, @@ -104,25 +104,25 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - 2, 1, 5, 3, 8, []int64{1, 1}, agg1, + 2, 1, 5, 3, 8, []int64{1, 1}, []float64{2}, }, }, { []tag.Tag{{Key: k1, Value: "v1 other"}}, &DistributionData{ - 1, 1, 1, 1, 0, []int64{1, 0}, agg1, + 1, 1, 1, 1, 0, []int64{1, 0}, []float64{2}, }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ - 1, 5, 5, 5, 0, []int64{0, 1}, agg1, + 1, 5, 5, 5, 0, []int64{0, 1}, []float64{2}, }, }, { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &DistributionData{ - 1, 5, 5, 5, 0, []int64{0, 1}, agg1, + 1, 5, 5, 5, 0, []int64{0, 1}, []float64{2}, }, }, }, @@ -142,19 +142,19 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}}, &DistributionData{ - 2, 1, 5, 3, 8, []int64{1, 1}, agg1, + 2, 1, 5, 3, 8, []int64{1, 1}, []float64{2}, }, }, { []tag.Tag{{Key: k1, Value: "v1 is another very long value key"}}, &DistributionData{ - 1, 1, 1, 1, 0, []int64{1, 0}, agg1, + 1, 1, 1, 1, 0, []int64{1, 0}, []float64{2}, }, }, { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}, {Key: k2, Value: "v2 is a very long value key"}}, &DistributionData{ - 4, 1, 5, 3, 2.66666666666667 * 3, []int64{1, 3}, agg1, + 4, 1, 5, 3, 2.66666666666667 * 3, []int64{1, 3}, []float64{2}, }, }, }, @@ -198,7 +198,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { k2, _ := tag.NewKey("k2") k3, _ := tag.NewKey("k3") m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitNone) - view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: SumAggregation{}}) + view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Sum()}) if err != nil { t.Fatal(err) } @@ -313,21 +313,21 @@ func TestCanonicalize(t *testing.T) { k1, _ := tag.NewKey("k1") k2, _ := tag.NewKey("k2") m, _ := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitNone) - v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: MeanAggregation{}} - vc, err := v.canonicalized() + v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Mean()} + err := v.canonicalize() if err != nil { t.Fatal(err) } - if got, want := vc.Name, "TestCanonicalize/m1"; got != want { + if got, want := v.Name, "TestCanonicalize/m1"; got != want { t.Errorf("vc.Name = %q; want %q", got, want) } - if got, want := vc.Description, "desc desc"; got != want { + if got, want := v.Description, "desc desc"; got != want { t.Errorf("vc.Description = %q; want %q", got, want) } - if got, want := len(vc.TagKeys), 2; got != want { + if got, want := len(v.TagKeys), 2; got != want { t.Errorf("len(vc.TagKeys) = %d; want %d", got, want) } - if got, want := vc.TagKeys[0].Name(), "k1"; got != want { + if got, want := v.TagKeys[0].Name(), "k1"; got != want { t.Errorf("vc.TagKeys[0].Name() = %q; want %q", got, want) } } @@ -337,7 +337,7 @@ func Test_View_MeasureFloat64_AggregationMean(t *testing.T) { k2, _ := tag.NewKey("k2") k3, _ := tag.NewKey("k3") m, _ := stats.Int64("Test_View_MeasureFloat64_AggregationMean/m1", "", stats.UnitNone) - viewDesc := &View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: MeanAggregation{}} + viewDesc := &View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Mean()} view, err := newViewInternal(viewDesc) if err != nil { t.Fatal(err) @@ -463,7 +463,7 @@ func TestViewSortedKeys(t *testing.T) { Description: "desc sort_keys", TagKeys: ks, Measure: m, - Aggregation: &MeanAggregation{}, + Aggregation: Mean(), }) // Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view v := Find("sort_keys") diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 29f7e1dfd..2b4f6bee0 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -79,6 +79,11 @@ func (v *View) Subscribe() error { // Subscribe begins collecting data for the given views. // Once a view is subscribed, it reports data to the registered exporters. func Subscribe(views ...*View) error { + for _, v := range views { + if err := v.canonicalize(); err != nil { + return err + } + } req := &subscribeToViewReq{ views: views, err: make(chan error), @@ -89,6 +94,8 @@ func Subscribe(views ...*View) error { // Unsubscribe the given views. Data will not longer be exported for these views // after Unsubscribe returns. +// It is not necessary to unsubscribe from views you expect to collect for the +// duration of your program execution. func Unsubscribe(views ...*View) { names := make([]string, len(views)) for i := range views { diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 4e7c16089..8c6cde623 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -22,6 +22,7 @@ import ( "time" "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -57,6 +58,7 @@ func (cmd *subscribeToViewReq) handleCommand(w *worker) { errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) continue } + internal.SubscriptionReporter(view.Measure.Name()) vi.subscribe() } if len(errstr) > 0 { @@ -135,6 +137,9 @@ type recordReq struct { func (cmd *recordReq) handleCommand(w *worker) { for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not subscribed + continue + } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { v.addSample(cmd.tm, m.Value()) diff --git a/vendor/go.opencensus.io/stats/view/worker_test.go b/vendor/go.opencensus.io/stats/view/worker_test.go index 9ea8bcb89..d81c4337b 100644 --- a/vendor/go.opencensus.io/stats/view/worker_test.go +++ b/vendor/go.opencensus.io/stats/view/worker_test.go @@ -121,18 +121,18 @@ func Test_Worker_ViewSubscription(t *testing.T) { "v1ID": { Name: "VF1", Measure: mf1, - Aggregation: &CountAggregation{}, + Aggregation: Count(), }, "v1SameNameID": { Name: "VF1", Description: "desc duplicate name VF1", Measure: mf1, - Aggregation: &SumAggregation{}, + Aggregation: Sum(), }, "v2ID": { Name: "VF2", Measure: mf2, - Aggregation: &CountAggregation{}, + Aggregation: Count(), }, "vNilID": nil, } @@ -167,8 +167,8 @@ func Test_Worker_RecordFloat64(t *testing.T) { t.Fatal(err) } - v1 := &View{"VF1", "desc VF1", []tag.Key{k1, k2}, m, CountAggregation{}} - v2 := &View{"VF2", "desc VF2", []tag.Key{k1, k2}, m, CountAggregation{}} + v1 := &View{"VF1", "desc VF1", []tag.Key{k1, k2}, m, Count()} + v2 := &View{"VF2", "desc VF2", []tag.Key{k1, k2}, m, Count()} type want struct { v *View @@ -306,12 +306,12 @@ func TestReportUsage(t *testing.T) { }{ { name: "cum", - view: &View{Name: "cum1", Measure: m, Aggregation: CountAggregation{}}, + view: &View{Name: "cum1", Measure: m, Aggregation: Count()}, wantMaxCount: 8, }, { name: "cum2", - view: &View{Name: "cum1", Measure: m, Aggregation: CountAggregation{}}, + view: &View{Name: "cum1", Measure: m, Aggregation: Count()}, wantMaxCount: 8, }, } @@ -378,7 +378,7 @@ func TestWorkerStarttime(t *testing.T) { if err != nil { t.Fatalf("stats.Int64() = %v", err) } - v, _ := New("testview", "", nil, m, CountAggregation{}) + v, _ := New("testview", "", nil, m, Count()) SetReportingPeriod(25 * time.Millisecond) if err := v.Subscribe(); err != nil { diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index e57a05340..3ba7f0a34 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -41,46 +41,26 @@ type Annotation struct { Attributes map[string]interface{} } -// Attribute is an interface for attributes; -// it is implemented by BoolAttribute, IntAttribute, and StringAttribute. -type Attribute interface { - isAttribute() +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} } // BoolAttribute returns a bool-valued attribute. func BoolAttribute(key string, value bool) Attribute { - return boolAttribute{key: key, value: value} + return Attribute{key: key, value: value} } -type boolAttribute struct { - key string - value bool -} - -func (b boolAttribute) isAttribute() {} - -type int64Attribute struct { - key string - value int64 -} - -func (i int64Attribute) isAttribute() {} - // Int64Attribute returns an int64-valued attribute. func Int64Attribute(key string, value int64) Attribute { - return int64Attribute{key: key, value: value} + return Attribute{key: key, value: value} } -type stringAttribute struct { - key string - value string -} - -func (s stringAttribute) isAttribute() {} - // StringAttribute returns a string-valued attribute. func StringAttribute(key string, value string) Attribute { - return stringAttribute{key: key, value: value} + return Attribute{key: key, value: value} } // LinkType specifies the relationship between the span that had the link diff --git a/vendor/go.opencensus.io/trace/benchmark_test.go b/vendor/go.opencensus.io/trace/benchmark_test.go index 3d6945789..8c6cc5abb 100644 --- a/vendor/go.opencensus.io/trace/benchmark_test.go +++ b/vendor/go.opencensus.io/trace/benchmark_test.go @@ -20,66 +20,86 @@ import ( ) func BenchmarkStartEndSpan(b *testing.B) { - ctx := context.Background() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, span := StartSpan(ctx, "/foo") - span.End() - } + traceBenchmark(b, func(b *testing.B) { + ctx := context.Background() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, span := StartSpan(ctx, "/foo") + span.End() + } + }) } func BenchmarkSpanWithAnnotations_3(b *testing.B) { - ctx := context.Background() - b.ResetTimer() + traceBenchmark(b, func(b *testing.B) { + ctx := context.Background() + b.ResetTimer() - for i := 0; i < b.N; i++ { - _, span := StartSpan(ctx, "/foo") - span.SetAttributes( - BoolAttribute("key1", false), - StringAttribute("key2", "hello"), - Int64Attribute("key3", 123), - ) - span.End() - } + for i := 0; i < b.N; i++ { + _, span := StartSpan(ctx, "/foo") + span.AddAttributes( + BoolAttribute("key1", false), + StringAttribute("key2", "hello"), + Int64Attribute("key3", 123), + ) + span.End() + } + }) } func BenchmarkSpanWithAnnotations_6(b *testing.B) { - ctx := context.Background() - b.ResetTimer() + traceBenchmark(b, func(b *testing.B) { + ctx := context.Background() + b.ResetTimer() - for i := 0; i < b.N; i++ { - _, span := StartSpan(ctx, "/foo") - span.SetAttributes( - BoolAttribute("key1", false), - BoolAttribute("key2", true), - StringAttribute("key3", "hello"), - StringAttribute("key4", "hello"), - Int64Attribute("key5", 123), - Int64Attribute("key6", 456), - ) - span.End() - } + for i := 0; i < b.N; i++ { + _, span := StartSpan(ctx, "/foo") + span.AddAttributes( + BoolAttribute("key1", false), + BoolAttribute("key2", true), + StringAttribute("key3", "hello"), + StringAttribute("key4", "hello"), + Int64Attribute("key5", 123), + Int64Attribute("key6", 456), + ) + span.End() + } + }) } func BenchmarkTraceID_DotString(b *testing.B) { - b.ReportAllocs() - t := TraceID{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F, 0x0F, 0x0E, 0x0E, 0x0B, 0x0D, 0x0A, 0x0E, 0x0D} - want := "0d0e0a0d0b0e0e0f0f0e0e0b0d0a0e0d" - for i := 0; i < b.N; i++ { - if got := t.String(); got != want { - b.Fatalf("got = %q want = %q", got, want) + traceBenchmark(b, func(b *testing.B) { + t := TraceID{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F, 0x0F, 0x0E, 0x0E, 0x0B, 0x0D, 0x0A, 0x0E, 0x0D} + want := "0d0e0a0d0b0e0e0f0f0e0e0b0d0a0e0d" + for i := 0; i < b.N; i++ { + if got := t.String(); got != want { + b.Fatalf("got = %q want = %q", got, want) + } } - } + }) } func BenchmarkSpanID_DotString(b *testing.B) { - b.ReportAllocs() - s := SpanID{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F} - want := "0d0e0a0d0b0e0e0f" - for i := 0; i < b.N; i++ { - if got := s.String(); got != want { - b.Fatalf("got = %q want = %q", got, want) + traceBenchmark(b, func(b *testing.B) { + s := SpanID{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F} + want := "0d0e0a0d0b0e0e0f" + for i := 0; i < b.N; i++ { + if got := s.String(); got != want { + b.Fatalf("got = %q want = %q", got, want) + } } - } + }) +} + +func traceBenchmark(b *testing.B, fn func(*testing.B)) { + b.Run("AlwaysSample", func(b *testing.B) { + b.ReportAllocs() + SetDefaultSampler(AlwaysSample()) + fn(b) + }) + b.Run("NeverSample", func(b *testing.B) { + b.ReportAllocs() + SetDefaultSampler(NeverSample()) + fn(b) + }) } diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go index db5d275a2..086612c9a 100644 --- a/vendor/go.opencensus.io/trace/export.go +++ b/vendor/go.opencensus.io/trace/export.go @@ -58,6 +58,7 @@ func UnregisterExporter(e Exporter) { type SpanData struct { SpanContext ParentSpanID SpanID + SpanKind int Name string StartTime time.Time // The wall clock time of EndTime will be adjusted to always be offset diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index b6ad0c60e..0340b6a83 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -100,6 +100,13 @@ func WithSpan(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + // StartOptions contains options concerning how a span is started. type StartOptions struct { // Sampler to consult for this Span. If provided, it is always consulted. @@ -111,9 +118,11 @@ type StartOptions struct { // when there is a non-remote parent, no new sampling decision will be made: // we will preserve the sampling of the parent. Sampler Sampler -} -// TODO(jbd): Remove start options. + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. @@ -180,6 +189,7 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa span.data = &SpanData{ SpanContext: span.spanContext, StartTime: time.Now(), + SpanKind: o.SpanKind, Name: name, HasRemoteParent: remoteParent, } @@ -213,10 +223,10 @@ func (s *Span) End() { if s.spanContext.IsSampled() { // TODO: consider holding exportersMu for less time. exportersMu.Lock() - defer exportersMu.Unlock() for e := range exporters { e.ExportSpan(sd) } + exportersMu.Unlock() } }) } @@ -255,10 +265,10 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } -// SetAttributes sets attributes in the span. +// AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. -func (s *Span) SetAttributes(attributes ...Attribute) { +func (s *Span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } @@ -273,14 +283,7 @@ func (s *Span) SetAttributes(attributes ...Attribute) { // copyAttributes copies a slice of Attributes into a map. func copyAttributes(m map[string]interface{}, attributes []Attribute) { for _, a := range attributes { - switch a := a.(type) { - case boolAttribute: - m[a.key] = a.value - case int64Attribute: - m[a.key] = a.value - case stringAttribute: - m[a.key] = a.value - } + m[a.key] = a.value } } diff --git a/vendor/go.opencensus.io/trace/trace_test.go b/vendor/go.opencensus.io/trace/trace_test.go index 957ab7acb..862b2c939 100644 --- a/vendor/go.opencensus.io/trace/trace_test.go +++ b/vendor/go.opencensus.io/trace/trace_test.go @@ -236,14 +236,15 @@ func TestStartSpanWithRemoteParent(t *testing.T) { } // startSpan returns a context with a new Span that is recording events and will be exported. -func startSpan() *Span { +func startSpan(o StartOptions) *Span { return NewSpanWithRemoteParent("span0", SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 1, }, - StartOptions{}) + o, + ) } type testExporter struct { @@ -295,9 +296,78 @@ func checkTime(x *time.Time) bool { return true } +func TestSpanKind(t *testing.T) { + tests := []struct { + name string + startOptions StartOptions + want *SpanData + }{ + { + name: "zero StartOptions", + startOptions: StartOptions{}, + want: &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + SpanKind: SpanKindUnspecified, + HasRemoteParent: true, + }, + }, + { + name: "client span", + startOptions: StartOptions{ + SpanKind: SpanKindClient, + }, + want: &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + SpanKind: SpanKindClient, + HasRemoteParent: true, + }, + }, + { + name: "server span", + startOptions: StartOptions{ + SpanKind: SpanKindServer, + }, + want: &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + SpanKind: SpanKindServer, + HasRemoteParent: true, + }, + }, + } + + for _, tt := range tests { + span := startSpan(tt.startOptions) + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("exporting span: got %#v want %#v", got, tt.want) + } + } +} + func TestSetSpanAttributes(t *testing.T) { - span := startSpan() - span.SetAttributes(StringAttribute("key1", "value1")) + span := startSpan(StartOptions{}) + span.AddAttributes(StringAttribute("key1", "value1")) got, err := endSpan(span) if err != nil { t.Fatal(err) @@ -320,7 +390,7 @@ func TestSetSpanAttributes(t *testing.T) { } func TestAnnotations(t *testing.T) { - span := startSpan() + span := startSpan(StartOptions{}) span.Annotatef([]Attribute{StringAttribute("key1", "value1")}, "%f", 1.5) span.Annotate([]Attribute{StringAttribute("key2", "value2")}, "Annotate") got, err := endSpan(span) @@ -354,7 +424,7 @@ func TestAnnotations(t *testing.T) { } func TestMessageEvents(t *testing.T) { - span := startSpan() + span := startSpan(StartOptions{}) span.AddMessageReceiveEvent(3, 400, 300) span.AddMessageSendEvent(1, 200, 100) got, err := endSpan(span) @@ -388,7 +458,7 @@ func TestMessageEvents(t *testing.T) { } func TestSetSpanStatus(t *testing.T) { - span := startSpan() + span := startSpan(StartOptions{}) span.SetStatus(Status{Code: int32(1), Message: "request failed"}) got, err := endSpan(span) if err != nil { @@ -412,7 +482,7 @@ func TestSetSpanStatus(t *testing.T) { } func TestAddLink(t *testing.T) { - span := startSpan() + span := startSpan(StartOptions{}) span.AddLink(Link{ TraceID: tid, SpanID: sid, @@ -450,7 +520,7 @@ func TestUnregisterExporter(t *testing.T) { RegisterExporter(&te) UnregisterExporter(&te) - ctx := startSpan() + ctx := startSpan(StartOptions{}) endSpan(ctx) if len(te.spans) != 0 { t.Error("unregistered Exporter was called") diff --git a/vendor/go.opencensus.io/zpages/rpcz.go b/vendor/go.opencensus.io/zpages/rpcz.go index d3b3af795..663b9b308 100644 --- a/vendor/go.opencensus.io/zpages/rpcz.go +++ b/vendor/go.opencensus.io/zpages/rpcz.go @@ -26,7 +26,6 @@ import ( "text/tabwriter" "time" - "go.opencensus.io/internal" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" ) @@ -62,8 +61,7 @@ func init() { for v := range viewType { views = append(views, v) } - err := view.Subscribe(views...) - if err != nil { + if err := view.Subscribe(views...); err != nil { log.Printf("error subscribing to views: %v", err) } view.RegisterExporter(snapExporter{}) @@ -143,60 +141,6 @@ type headerData struct { Title string } -type summaryPageData struct { - Header []string - LatencyBucketNames []string - Links bool - TracesEndpoint string - Rows []summaryPageRow -} - -type summaryPageRow struct { - Name string - Active int - Latency []int - Errors int -} - -func (s *summaryPageData) Len() int { return len(s.Rows) } -func (s *summaryPageData) Less(i, j int) bool { return s.Rows[i].Name < s.Rows[j].Name } -func (s *summaryPageData) Swap(i, j int) { s.Rows[i], s.Rows[j] = s.Rows[j], s.Rows[i] } - -func getSummaryPageData() summaryPageData { - data := summaryPageData{ - Links: true, - TracesEndpoint: "/tracez", - } - internalTrace := internal.Trace.(interface { - ReportSpansPerMethod() map[string]internal.PerMethodSummary - }) - for name, s := range internalTrace.ReportSpansPerMethod() { - if len(data.Header) == 0 { - data.Header = []string{"Name", "Active"} - for _, b := range s.LatencyBuckets { - l := b.MinLatency - s := fmt.Sprintf(">%v", l) - if l == 100*time.Second { - s = ">100s" - } - data.Header = append(data.Header, s) - data.LatencyBucketNames = append(data.LatencyBucketNames, s) - } - data.Header = append(data.Header, "Errors") - } - row := summaryPageRow{Name: name, Active: s.Active} - for _, l := range s.LatencyBuckets { - row.Latency = append(row.Latency, l.Size) - } - for _, e := range s.ErrorBuckets { - row.Errors += e.Size - } - data.Rows = append(data.Rows, row) - } - sort.Sort(&data) - return data -} - // statsPage aggregates stats on the page for 'sent' and 'received' categories type statsPage struct { StatGroups []*statGroup diff --git a/vendor/go.opencensus.io/zpages/rpcz_test.go b/vendor/go.opencensus.io/zpages/rpcz_test.go new file mode 100644 index 000000000..9764f6fb3 --- /dev/null +++ b/vendor/go.opencensus.io/zpages/rpcz_test.go @@ -0,0 +1,55 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package zpages + +import ( + "context" + "testing" + "time" + + "go.opencensus.io/internal/testpb" + "go.opencensus.io/stats/view" +) + +func TestRpcz(t *testing.T) { + client, cleanup := testpb.NewTestClient(t) + defer cleanup() + + _, err := client.Single(context.Background(), &testpb.FooRequest{}) + if err != nil { + t.Fatal(err) + } + + view.SetReportingPeriod(time.Millisecond) + time.Sleep(2 * time.Millisecond) + view.SetReportingPeriod(time.Second) + + mu.Lock() + defer mu.Unlock() + + if len(snaps) == 0 { + t.Fatal("Expected len(snaps) > 0") + } + + snapshot, ok := snaps[methodKey{"testpb.Foo/Single", false}] + if !ok { + t.Fatal("Expected method stats not recorded") + } + + if got, want := snapshot.CountTotal, 1; got != want { + t.Errorf("snapshot.CountTotal = %d; want %d", got, want) + } +} diff --git a/vendor/go.opencensus.io/zpages/tracez.go b/vendor/go.opencensus.io/zpages/tracez.go index 6469632f1..e3ff2c4ab 100644 --- a/vendor/go.opencensus.io/zpages/tracez.go +++ b/vendor/go.opencensus.io/zpages/tracez.go @@ -392,3 +392,55 @@ func writeTextTraces(w io.Writer, data traceData) { } tw.Flush() } + +type summaryPageData struct { + Header []string + LatencyBucketNames []string + Links bool + TracesEndpoint string + Rows []summaryPageRow +} + +type summaryPageRow struct { + Name string + Active int + Latency []int + Errors int +} + +func getSummaryPageData() summaryPageData { + data := summaryPageData{ + Links: true, + TracesEndpoint: "tracez", + } + internalTrace := internal.Trace.(interface { + ReportSpansPerMethod() map[string]internal.PerMethodSummary + }) + for name, s := range internalTrace.ReportSpansPerMethod() { + if len(data.Header) == 0 { + data.Header = []string{"Name", "Active"} + for _, b := range s.LatencyBuckets { + l := b.MinLatency + s := fmt.Sprintf(">%v", l) + if l == 100*time.Second { + s = ">100s" + } + data.Header = append(data.Header, s) + data.LatencyBucketNames = append(data.LatencyBucketNames, s) + } + data.Header = append(data.Header, "Errors") + } + row := summaryPageRow{Name: name, Active: s.Active} + for _, l := range s.LatencyBuckets { + row.Latency = append(row.Latency, l.Size) + } + for _, e := range s.ErrorBuckets { + row.Errors += e.Size + } + data.Rows = append(data.Rows, row) + } + sort.Slice(data.Rows, func(i, j int) bool { + return data.Rows[i].Name < data.Rows[j].Name + }) + return data +} diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md index 88dff59bc..d0485e887 100644 --- a/vendor/golang.org/x/crypto/CONTRIBUTING.md +++ b/vendor/golang.org/x/crypto/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal.go b/vendor/golang.org/x/crypto/acme/autocert/renewal.go index 2a3a0a706..3fa4d61a2 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/renewal.go +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal.go @@ -71,12 +71,21 @@ func (dr *domainRenewal) renew() { testDidRenewLoop(next, err) } +// updateState locks and replaces the relevant Manager.state item with the given +// state. It additionally updates dr.key with the given state's key. +func (dr *domainRenewal) updateState(state *certState) { + dr.m.stateMu.Lock() + defer dr.m.stateMu.Unlock() + dr.key = state.key + dr.m.state[dr.domain] = state +} + // do is similar to Manager.createCert but it doesn't lock a Manager.state item. // Instead, it requests a new certificate independently and, upon success, // replaces dr.m.state item with a new one and updates cache for the given domain. // -// It may return immediately if the expiration date of the currently cached cert -// is far enough in the future. +// It may lock and update the Manager.state if the expiration date of the currently +// cached cert is far enough in the future. // // The returned value is a time interval after which the renewal should occur again. func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { @@ -85,7 +94,16 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { if tlscert, err := dr.m.cacheGet(ctx, dr.domain); err == nil { next := dr.next(tlscert.Leaf.NotAfter) if next > dr.m.renewBefore()+renewJitter { - return next, nil + signer, ok := tlscert.PrivateKey.(crypto.Signer) + if ok { + state := &certState{ + key: signer, + cert: tlscert.Certificate, + leaf: tlscert.Leaf, + } + dr.updateState(state) + return next, nil + } } } @@ -105,10 +123,7 @@ func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) { if err := dr.m.cachePut(ctx, dr.domain, tlscert); err != nil { return 0, err } - dr.m.stateMu.Lock() - defer dr.m.stateMu.Unlock() - // m.state is guaranteed to be non-nil at this point - dr.m.state[dr.domain] = state + dr.updateState(state) return dr.next(leaf.NotAfter), nil } diff --git a/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go b/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go index 11d40ff5d..6e88672bd 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go +++ b/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go @@ -189,3 +189,149 @@ func TestRenewFromCache(t *testing.T) { case <-done: } } + +func TestRenewFromCacheAlreadyRenewed(t *testing.T) { + const domain = "example.org" + + // use EC key to run faster on 386 + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + man := &Manager{ + Prompt: AcceptTOS, + Cache: newMemCache(), + RenewBefore: 24 * time.Hour, + Client: &acme.Client{ + Key: key, + DirectoryURL: "invalid", + }, + } + defer man.stopRenew() + + // cache a recently renewed cert with a different private key + newKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + now := time.Now() + newCert, err := dateDummyCert(newKey.Public(), now.Add(-2*time.Hour), now.Add(time.Hour*24*90), domain) + if err != nil { + t.Fatal(err) + } + newLeaf, err := validCert(domain, [][]byte{newCert}, newKey) + if err != nil { + t.Fatal(err) + } + newTLSCert := &tls.Certificate{PrivateKey: newKey, Certificate: [][]byte{newCert}, Leaf: newLeaf} + if err := man.cachePut(context.Background(), domain, newTLSCert); err != nil { + t.Fatal(err) + } + + // set internal state to an almost expired cert + oldCert, err := dateDummyCert(key.Public(), now.Add(-2*time.Hour), now.Add(time.Minute), domain) + if err != nil { + t.Fatal(err) + } + oldLeaf, err := validCert(domain, [][]byte{oldCert}, key) + if err != nil { + t.Fatal(err) + } + man.stateMu.Lock() + if man.state == nil { + man.state = make(map[string]*certState) + } + s := &certState{ + key: key, + cert: [][]byte{oldCert}, + leaf: oldLeaf, + } + man.state[domain] = s + man.stateMu.Unlock() + + // veriy the renewal accepted the newer cached cert + defer func() { + testDidRenewLoop = func(next time.Duration, err error) {} + }() + done := make(chan struct{}) + testDidRenewLoop = func(next time.Duration, err error) { + defer close(done) + if err != nil { + t.Errorf("testDidRenewLoop: %v", err) + } + // Next should be about 90 days + // Previous expiration was within 1 min. + future := 88 * 24 * time.Hour + if next < future { + t.Errorf("testDidRenewLoop: next = %v; want >= %v", next, future) + } + + // ensure the cached cert was not modified + tlscert, err := man.cacheGet(context.Background(), domain) + if err != nil { + t.Fatalf("man.cacheGet: %v", err) + } + if !tlscert.Leaf.NotAfter.Equal(newLeaf.NotAfter) { + t.Errorf("cache leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, newLeaf.NotAfter) + } + + // verify the old cert is also replaced in memory + man.stateMu.Lock() + defer man.stateMu.Unlock() + s := man.state[domain] + if s == nil { + t.Fatalf("m.state[%q] is nil", domain) + } + stateKey := s.key.Public().(*ecdsa.PublicKey) + if stateKey.X.Cmp(newKey.X) != 0 || stateKey.Y.Cmp(newKey.Y) != 0 { + t.Fatalf("state key was not updated from cache x: %v y: %v; want x: %v y: %v", stateKey.X, stateKey.Y, newKey.X, newKey.Y) + } + tlscert, err = s.tlscert() + if err != nil { + t.Fatalf("s.tlscert: %v", err) + } + if !tlscert.Leaf.NotAfter.Equal(newLeaf.NotAfter) { + t.Errorf("state leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, newLeaf.NotAfter) + } + + // verify the private key is replaced in the renewal state + r := man.renewal[domain] + if r == nil { + t.Fatalf("m.renewal[%q] is nil", domain) + } + renewalKey := r.key.Public().(*ecdsa.PublicKey) + if renewalKey.X.Cmp(newKey.X) != 0 || renewalKey.Y.Cmp(newKey.Y) != 0 { + t.Fatalf("renewal private key was not updated from cache x: %v y: %v; want x: %v y: %v", renewalKey.X, renewalKey.Y, newKey.X, newKey.Y) + } + + } + + // assert the expiring cert is returned from state + hello := &tls.ClientHelloInfo{ServerName: domain} + tlscert, err := man.GetCertificate(hello) + if err != nil { + t.Fatal(err) + } + if !oldLeaf.NotAfter.Equal(tlscert.Leaf.NotAfter) { + t.Errorf("state leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, oldLeaf.NotAfter) + } + + // trigger renew + go man.renew(domain, s.key, s.leaf.NotAfter) + + // wait for renew loop + select { + case <-time.After(10 * time.Second): + t.Fatal("renew took too long to occur") + case <-done: + // assert the new cert is returned from state after renew + hello := &tls.ClientHelloInfo{ServerName: domain} + tlscert, err := man.GetCertificate(hello) + if err != nil { + t.Fatal(err) + } + if !newTLSCert.Leaf.NotAfter.Equal(tlscert.Leaf.NotAfter) { + t.Errorf("state leaf.NotAfter = %v; want == %v", tlscert.Leaf.NotAfter, newTLSCert.Leaf.NotAfter) + } + } +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go index 4f26b49b6..a57771a1e 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -171,9 +171,16 @@ func Verify(publicKey PublicKey, message, sig []byte) bool { edwards25519.ScReduce(&hReduced, &digest) var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) var checkR [32]byte R.ToBytes(&checkR) diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_test.go b/vendor/golang.org/x/crypto/ed25519/ed25519_test.go index e272f8a55..5f946e996 100644 --- a/vendor/golang.org/x/crypto/ed25519/ed25519_test.go +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_test.go @@ -146,6 +146,30 @@ func TestGolden(t *testing.T) { } } +func TestMalleability(t *testing.T) { + // https://tools.ietf.org/html/rfc8032#section-5.1.7 adds an additional test + // that s be in [0, order). This prevents someone from adding a multiple of + // order to s and obtaining a second valid signature for the same message. + msg := []byte{0x54, 0x65, 0x73, 0x74} + sig := []byte{ + 0x7c, 0x38, 0xe0, 0x26, 0xf2, 0x9e, 0x14, 0xaa, 0xbd, 0x05, 0x9a, + 0x0f, 0x2d, 0xb8, 0xb0, 0xcd, 0x78, 0x30, 0x40, 0x60, 0x9a, 0x8b, + 0xe6, 0x84, 0xdb, 0x12, 0xf8, 0x2a, 0x27, 0x77, 0x4a, 0xb0, 0x67, + 0x65, 0x4b, 0xce, 0x38, 0x32, 0xc2, 0xd7, 0x6f, 0x8f, 0x6f, 0x5d, + 0xaf, 0xc0, 0x8d, 0x93, 0x39, 0xd4, 0xee, 0xf6, 0x76, 0x57, 0x33, + 0x36, 0xa5, 0xc5, 0x1e, 0xb6, 0xf9, 0x46, 0xb3, 0x1d, + } + publicKey := []byte{ + 0x7d, 0x4d, 0x0e, 0x7f, 0x61, 0x53, 0xa6, 0x9b, 0x62, 0x42, 0xb5, + 0x22, 0xab, 0xbe, 0xe6, 0x85, 0xfd, 0xa4, 0x42, 0x0f, 0x88, 0x34, + 0xb1, 0x08, 0xc3, 0xbd, 0xae, 0x36, 0x9e, 0xf5, 0x49, 0xfa, + } + + if Verify(publicKey, msg, sig) { + t.Fatal("non-canonical signature accepted") + } +} + func BenchmarkKeyGeneration(b *testing.B) { var zero zeroReader for i := 0; i < b.N; i++ { diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go index 5f8b99478..fd03c252a 100644 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -4,6 +4,8 @@ package edwards25519 +import "encoding/binary" + // This code is a port of the public domain, “ref10” implementation of ed25519 // from SUPERCOP. @@ -1769,3 +1771,23 @@ func ScReduce(out *[32]byte, s *[64]byte) { out[30] = byte(s11 >> 9) out[31] = byte(s11 >> 17) } + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/golang.org/x/crypto/nacl/sign/sign.go b/vendor/golang.org/x/crypto/nacl/sign/sign.go new file mode 100644 index 000000000..a9ac0a771 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/sign/sign.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sign signs small messages using public-key cryptography. +// +// Sign uses Ed25519 to sign messages. The length of messages is not hidden. +// Messages should be small because: +// 1. The whole message needs to be held in memory to be processed. +// 2. Using large messages pressures implementations on small machines to process +// plaintext without verifying the signature. This is very dangerous, and this API +// discourages it, but a protocol that uses excessive message sizes might present +// some implementations with no other choice. +// 3. Performance may be improved by working with messages that fit into data caches. +// Thus large amounts of data should be chunked so that each message is small. +// +// This package is not interoperable with the current release of NaCl +// (https://nacl.cr.yp.to/sign.html), which does not support Ed25519 yet. However, +// it is compatible with the NaCl fork libsodium (https://www.libsodium.org), as well +// as TweetNaCl (https://tweetnacl.cr.yp.to/). +package sign + +import ( + "io" + + "golang.org/x/crypto/ed25519" +) + +// Overhead is the number of bytes of overhead when signing a message. +const Overhead = 64 + +// GenerateKey generates a new public/private key pair suitable for use with +// Sign and Open. +func GenerateKey(rand io.Reader) (publicKey *[32]byte, privateKey *[64]byte, err error) { + pub, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + publicKey, privateKey = new([32]byte), new([64]byte) + copy((*publicKey)[:], pub) + copy((*privateKey)[:], priv) + return publicKey, privateKey, nil +} + +// Sign appends a signed copy of message to out, which will be Overhead bytes +// longer than the original and must not overlap it. +func Sign(out, message []byte, privateKey *[64]byte) []byte { + sig := ed25519.Sign(ed25519.PrivateKey((*privateKey)[:]), message) + ret, out := sliceForAppend(out, Overhead+len(message)) + copy(out, sig) + copy(out[Overhead:], message) + return ret +} + +// Open verifies a signed message produced by Sign and appends the message to +// out, which must not overlap the signed message. The output will be Overhead +// bytes smaller than the signed message. +func Open(out, signedMessage []byte, publicKey *[32]byte) ([]byte, bool) { + if len(signedMessage) < Overhead { + return nil, false + } + if !ed25519.Verify(ed25519.PublicKey((*publicKey)[:]), signedMessage[Overhead:], signedMessage[:Overhead]) { + return nil, false + } + ret, out := sliceForAppend(out, len(signedMessage)-Overhead) + copy(out, signedMessage[Overhead:]) + return ret, true +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/golang.org/x/crypto/nacl/sign/sign_test.go b/vendor/golang.org/x/crypto/nacl/sign/sign_test.go new file mode 100644 index 000000000..0a6439a62 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/sign/sign_test.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sign + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "testing" +) + +var testSignedMessage, _ = hex.DecodeString("26a0a47f733d02ddb74589b6cbd6f64a7dab1947db79395a1a9e00e4c902c0f185b119897b89b248d16bab4ea781b5a3798d25c2984aec833dddab57e0891e0d68656c6c6f20776f726c64") +var testMessage = testSignedMessage[Overhead:] +var testPublicKey [32]byte +var testPrivateKey = [64]byte{ + 0x98, 0x3c, 0x6a, 0xa6, 0x21, 0xcc, 0xbb, 0xb2, 0xa7, 0xe8, 0x97, 0x94, 0xde, 0x5f, 0xf8, 0x11, + 0x8a, 0xf3, 0x33, 0x1a, 0x03, 0x5c, 0x43, 0x99, 0x03, 0x13, 0x2d, 0xd7, 0xb4, 0xc4, 0x8b, 0xb0, + 0xf6, 0x33, 0x20, 0xa3, 0x34, 0x8b, 0x7b, 0xe2, 0xfe, 0xb4, 0xe7, 0x3a, 0x54, 0x08, 0x2d, 0xd7, + 0x0c, 0xb7, 0xc0, 0xe3, 0xbf, 0x62, 0x6c, 0x55, 0xf0, 0x33, 0x28, 0x52, 0xf8, 0x48, 0x7d, 0xfd, +} + +func init() { + copy(testPublicKey[:], testPrivateKey[32:]) +} + +func TestSign(t *testing.T) { + signedMessage := Sign(nil, testMessage, &testPrivateKey) + if !bytes.Equal(signedMessage, testSignedMessage) { + t.Fatalf("signed message did not match, got\n%x\n, expected\n%x", signedMessage, testSignedMessage) + } +} + +func TestOpen(t *testing.T) { + message, ok := Open(nil, testSignedMessage, &testPublicKey) + if !ok { + t.Fatalf("valid signed message not successfully verified") + } + if !bytes.Equal(message, testMessage) { + t.Fatalf("message did not match, got\n%x\n, expected\n%x", message, testMessage) + } + message, ok = Open(nil, testSignedMessage[1:], &testPublicKey) + if ok { + t.Fatalf("invalid signed message successfully verified") + } + + badMessage := make([]byte, len(testSignedMessage)) + copy(badMessage, testSignedMessage) + badMessage[5] ^= 1 + if _, ok := Open(nil, badMessage, &testPublicKey); ok { + t.Fatalf("Open succeeded with a corrupt message") + } + + var badPublicKey [32]byte + copy(badPublicKey[:], testPublicKey[:]) + badPublicKey[5] ^= 1 + if _, ok := Open(nil, testSignedMessage, &badPublicKey); ok { + t.Fatalf("Open succeeded with a corrupt public key") + } +} + +func TestGenerateSignOpen(t *testing.T) { + publicKey, privateKey, _ := GenerateKey(rand.Reader) + signedMessage := Sign(nil, testMessage, privateKey) + message, ok := Open(nil, signedMessage, publicKey) + if !ok { + t.Fatalf("failed to verify signed message") + } + + if !bytes.Equal(message, testMessage) { + t.Fatalf("verified message does not match signed messge, got\n%x\n, expected\n%x", message, testMessage) + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go index 266840d05..02b372cf3 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -42,12 +42,18 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } case PubKeyAlgoElGamal: e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) if err != nil { return } e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } } _, err = consumeAll(r) return @@ -72,7 +78,8 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { // padding oracle attacks. switch priv.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes) + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) case PubKeyAlgoElGamal: c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go index fee14cf3c..f2fcf4d35 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go @@ -39,39 +39,44 @@ var encryptedKeyPriv = &PrivateKey{ } func TestDecryptingEncryptedKey(t *testing.T) { - const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8" - const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b" + for i, encryptedKeyHex := range []string{ + "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8", + // MPI can be shorter than the length of the key. + "c18b032a67d68660df41c70103f8e520c52ae9807183c669ce26e772e482dc5d8cf60e6f59316e145be14d2e5221ee69550db1d5618a8cb002a719f1f0b9345bde21536d410ec90ba86cac37748dec7933eb7f9873873b2d61d3321d1cd44535014f6df58f7bc0c7afb5edc38e1a974428997d2f747f9a173bea9ca53079b409517d332df62d805564cffc9be6", + } { + const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b" - p, err := Read(readerFromHex(encryptedKeyHex)) - if err != nil { - t.Errorf("error from Read: %s", err) - return - } - ek, ok := p.(*EncryptedKey) - if !ok { - t.Errorf("didn't parse an EncryptedKey, got %#v", p) - return - } + p, err := Read(readerFromHex(encryptedKeyHex)) + if err != nil { + t.Errorf("#%d: error from Read: %s", i, err) + return + } + ek, ok := p.(*EncryptedKey) + if !ok { + t.Errorf("#%d: didn't parse an EncryptedKey, got %#v", i, p) + return + } - if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA { - t.Errorf("unexpected EncryptedKey contents: %#v", ek) - return - } + if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA { + t.Errorf("#%d: unexpected EncryptedKey contents: %#v", i, ek) + return + } - err = ek.Decrypt(encryptedKeyPriv, nil) - if err != nil { - t.Errorf("error from Decrypt: %s", err) - return - } + err = ek.Decrypt(encryptedKeyPriv, nil) + if err != nil { + t.Errorf("#%d: error from Decrypt: %s", i, err) + return + } - if ek.CipherFunc != CipherAES256 { - t.Errorf("unexpected EncryptedKey contents: %#v", ek) - return - } + if ek.CipherFunc != CipherAES256 { + t.Errorf("#%d: unexpected EncryptedKey contents: %#v", i, ek) + return + } - keyHex := fmt.Sprintf("%x", ek.Key) - if keyHex != expectedKeyHex { - t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex) + keyHex := fmt.Sprintf("%x", ek.Key) + if keyHex != expectedKeyHex { + t.Errorf("#%d: bad key, got %s want %s", i, keyHex, expectedKeyHex) + } } } @@ -121,7 +126,7 @@ func TestEncryptingEncryptedKey(t *testing.T) { keyHex := fmt.Sprintf("%x", ek.Key) if keyHex != expectedKeyHex { - t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex) + t.Errorf("bad key, got %s want %s", keyHex, expectedKeyHex) } } diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 3eded93f0..625bb5ac8 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -11,10 +11,12 @@ import ( "crypto/aes" "crypto/cipher" "crypto/des" - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" + "crypto/rsa" "io" "math/big" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" ) // readFull is the same as io.ReadFull except that reading zero bytes returns @@ -500,19 +502,17 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { numBytes := (int(bitLength) + 7) / 8 mpi = make([]byte, numBytes) _, err = readFull(r, mpi) - return -} - -// mpiLength returns the length of the given *big.Int when serialized as an -// MPI. -func mpiLength(n *big.Int) (mpiLengthInBytes int) { - mpiLengthInBytes = 2 /* MPI length */ - mpiLengthInBytes += (n.BitLen() + 7) / 8 + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. return } // writeMPI serializes a big integer to w. func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) if err == nil { _, err = w.Write(mpiBytes) @@ -525,6 +525,18 @@ func writeBig(w io.Writer, i *big.Int) error { return writeMPI(w, uint16(i.BitLen()), i.Bytes()) } +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + // CompressionAlgo Represents the different compression algorithms // supported by OpenPGP (except for BZIP2, which is not currently // supported). See Section 9.3 of RFC 4880. diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go index ead26233d..fcd5f5251 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -244,7 +244,12 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey } pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes)) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) pk.setFingerPrintAndKeyId() return pk @@ -515,7 +520,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) if err != nil { return errors.SignatureError("RSA verification failure") } @@ -566,7 +571,7 @@ func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { return errors.SignatureError("RSA verification failure") } return diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go index 7ad7d9185..103696ee7 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_test.go @@ -6,7 +6,10 @@ package packet import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" "encoding/hex" + "math/big" "testing" "time" ) @@ -186,6 +189,29 @@ func TestEcc384Serialize(t *testing.T) { } } +func TestP256KeyID(t *testing.T) { + // Confirm that key IDs are correctly calculated for ECC keys. + ecdsaPub := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: fromHex("81fbbc20eea9e8d1c3ceabb0a8185925b113d1ac42cd5c78403bd83da19235c6"), + Y: fromHex("5ed6db13d91db34507d0129bf88981878d29adbf8fcd1720afdb767bb3fcaaff"), + } + pub := NewECDSAPublicKey(time.Unix(1297309478, 0), ecdsaPub) + + const want = uint64(0xd01055fbcadd268e) + if pub.KeyId != want { + t.Errorf("want key ID: %x, got %x", want, pub.KeyId) + } +} + +func fromHex(hex string) *big.Int { + n, ok := new(big.Int).SetString(hex, 16) + if !ok { + panic("bad hex number: " + hex) + } + return n +} + const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" diff --git a/vendor/golang.org/x/crypto/xtea/block.go b/vendor/golang.org/x/crypto/xtea/block.go index bf5d24599..fcb4e4d00 100644 --- a/vendor/golang.org/x/crypto/xtea/block.go +++ b/vendor/golang.org/x/crypto/xtea/block.go @@ -50,7 +50,7 @@ func encryptBlock(c *Cipher, dst, src []byte) { uint32ToBlock(v0, v1, dst) } -// decryptBlock decrypt a single 8 byte block using XTEA. +// decryptBlock decrypts a single 8 byte block using XTEA. func decryptBlock(c *Cipher, dst, src []byte) { v0, v1 := blockToUint32(src) diff --git a/vendor/golang.org/x/crypto/xtea/cipher.go b/vendor/golang.org/x/crypto/xtea/cipher.go index 66ea0df16..1661cbea8 100644 --- a/vendor/golang.org/x/crypto/xtea/cipher.go +++ b/vendor/golang.org/x/crypto/xtea/cipher.go @@ -14,8 +14,8 @@ import "strconv" const BlockSize = 8 // A Cipher is an instance of an XTEA cipher using a particular key. -// table contains a series of precalculated values that are used each round. type Cipher struct { + // table contains a series of precalculated values that are used each round. table [64]uint32 } @@ -54,7 +54,7 @@ func (c *Cipher) BlockSize() int { return BlockSize } // instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c, dst, src) } -// Decrypt decrypts the 8 byte buffer src using the key k and stores the result in dst. +// Decrypt decrypts the 8 byte buffer src using the key and stores the result in dst. func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c, dst, src) } // initCipher initializes the cipher context by creating a look up table diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md index 88dff59bc..d0485e887 100644 --- a/vendor/golang.org/x/net/CONTRIBUTING.md +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go index 624f9b69f..d8d3b0342 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -90,6 +90,8 @@ var ( errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") + errStringTooLong = errors.New("character string exceeds maximum length (255)") + errCompressedSRV = errors.New("compressed name in SRV resource data") ) // Internal constants. @@ -218,6 +220,7 @@ func (h *header) count(sec section) uint16 { return 0 } +// pack appends the wire format of the header to msg. func (h *header) pack(msg []byte) []byte { msg = packUint16(msg, h.id) msg = packUint16(msg, h.bits) @@ -280,6 +283,7 @@ type ResourceBody interface { realType() Type } +// pack appends the wire format of the Resource to msg. func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { if r.Body == nil { return msg, errNilResouceBody @@ -1311,9 +1315,10 @@ type ResourceHeader struct { Length uint16 } -// pack packs all of the fields in a ResourceHeader except for the length. The -// length bytes are returned as a slice so they can be filled in after the rest -// of the Resource has been packed. +// pack appends the wire format of the ResourceHeader to oldMsg. +// +// The bytes where length was packed are returned as a slice so they can be +// updated after the rest of the Resource has been packed. func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, length []byte, err error) { msg = oldMsg if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil { @@ -1385,6 +1390,7 @@ func skipResource(msg []byte, off int) (int, error) { return newOff, nil } +// packUint16 appends the wire format of field to msg. func packUint16(msg []byte, field uint16) []byte { return append(msg, byte(field>>8), byte(field)) } @@ -1403,6 +1409,7 @@ func skipUint16(msg []byte, off int) (int, error) { return off + uint16Len, nil } +// packType appends the wire format of field to msg. func packType(msg []byte, field Type) []byte { return packUint16(msg, uint16(field)) } @@ -1416,6 +1423,7 @@ func skipType(msg []byte, off int) (int, error) { return skipUint16(msg, off) } +// packClass appends the wire format of field to msg. func packClass(msg []byte, field Class) []byte { return packUint16(msg, uint16(field)) } @@ -1429,6 +1437,7 @@ func skipClass(msg []byte, off int) (int, error) { return skipUint16(msg, off) } +// packUint32 appends the wire format of field to msg. func packUint32(msg []byte, field uint32) []byte { return append( msg, @@ -1454,17 +1463,16 @@ func skipUint32(msg []byte, off int) (int, error) { return off + uint32Len, nil } -func packText(msg []byte, field string) []byte { - for len(field) > 0 { - l := len(field) - if l > 255 { - l = 255 - } - msg = append(msg, byte(l)) - msg = append(msg, field[:l]...) - field = field[l:] +// packText appends the wire format of field to msg. +func packText(msg []byte, field string) ([]byte, error) { + l := len(field) + if l > 255 { + return nil, errStringTooLong } - return msg + msg = append(msg, byte(l)) + msg = append(msg, field...) + + return msg, nil } func unpackText(msg []byte, off int) (string, int, error) { @@ -1490,6 +1498,7 @@ func skipText(msg []byte, off int) (int, error) { return endOff, nil } +// packBytes appends the wire format of field to msg. func packBytes(msg []byte, field []byte) []byte { return append(msg, field...) } @@ -1534,7 +1543,7 @@ func (n Name) String() string { return string(n.Data[:n.Length]) } -// pack packs a domain name. +// pack appends the wire format of the Name to msg. // // Domain names are a sequence of counted strings split at the dots. They end // with a zero-length string. Compression can be used to reuse domain suffixes. @@ -1602,6 +1611,10 @@ func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) // unpack unpacks a domain name. func (n *Name) unpack(msg []byte, off int) (int, error) { + return n.unpackCompressed(msg, off, true /* allowCompression */) +} + +func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) { // currOff is the current working offset. currOff := off @@ -1637,6 +1650,9 @@ Loop: name = append(name, '.') currOff = endOff case 0xC0: // Pointer + if !allowCompression { + return off, errCompressedSRV + } if currOff >= len(msg) { return off, errInvalidPtr } @@ -1716,6 +1732,7 @@ type Question struct { Class Class } +// pack appends the wire format of the Question to msg. func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { msg, err := q.Name.pack(msg, compression, compressionOff) if err != nil { @@ -1796,6 +1813,7 @@ func (r *CNAMEResource) realType() Type { return TypeCNAME } +// pack appends the wire format of the CNAMEResource to msg. func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return r.CNAME.pack(msg, compression, compressionOff) } @@ -1818,6 +1836,7 @@ func (r *MXResource) realType() Type { return TypeMX } +// pack appends the wire format of the MXResource to msg. func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Pref) @@ -1849,6 +1868,7 @@ func (r *NSResource) realType() Type { return TypeNS } +// pack appends the wire format of the NSResource to msg. func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return r.NS.pack(msg, compression, compressionOff) } @@ -1870,6 +1890,7 @@ func (r *PTRResource) realType() Type { return TypePTR } +// pack appends the wire format of the PTRResource to msg. func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return r.PTR.pack(msg, compression, compressionOff) } @@ -1901,6 +1922,7 @@ func (r *SOAResource) realType() Type { return TypeSOA } +// pack appends the wire format of the SOAResource to msg. func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg, err := r.NS.pack(msg, compression, compressionOff) @@ -1953,19 +1975,28 @@ func unpackSOAResource(msg []byte, off int) (SOAResource, error) { // A TXTResource is a TXT Resource record. type TXTResource struct { - Txt string // Not a domain name. + TXT []string } func (r *TXTResource) realType() Type { return TypeTXT } +// pack appends the wire format of the TXTResource to msg. func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { - return packText(msg, r.Txt), nil + oldMsg := msg + for _, s := range r.TXT { + var err error + msg, err = packText(msg, s) + if err != nil { + return oldMsg, err + } + } + return msg, nil } func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) { - var txt string + txts := make([]string, 0, 1) for n := uint16(0); n < length; { var t string var err error @@ -1977,9 +2008,9 @@ func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) return TXTResource{}, errCalcLen } n += uint16(len(t)) + 1 - txt += t + txts = append(txts, t) } - return TXTResource{txt}, nil + return TXTResource{txts}, nil } // An SRVResource is an SRV Resource record. @@ -1994,6 +2025,7 @@ func (r *SRVResource) realType() Type { return TypeSRV } +// pack appends the wire format of the SRVResource to msg. func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Priority) @@ -2020,7 +2052,7 @@ func unpackSRVResource(msg []byte, off int) (SRVResource, error) { return SRVResource{}, &nestedError{"Port", err} } var target Name - if _, err := target.unpack(msg, off); err != nil { + if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil { return SRVResource{}, &nestedError{"Target", err} } return SRVResource{priority, weight, port, target}, nil @@ -2035,6 +2067,7 @@ func (r *AResource) realType() Type { return TypeA } +// pack appends the wire format of the AResource to msg. func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packBytes(msg, r.A[:]), nil } @@ -2056,6 +2089,7 @@ func (r *AAAAResource) realType() Type { return TypeAAAA } +// pack appends the wire format of the AAAAResource to msg. func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packBytes(msg, r.AAAA[:]), nil } diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go index d4eca26f1..052897f3c 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strings" "testing" ) @@ -157,6 +158,28 @@ func TestNamePackUnpack(t *testing.T) { } } +func TestIncompressibleName(t *testing.T) { + name := mustNewName("example.com.") + compression := map[string]int{} + buf, err := name.pack(make([]byte, 0, 100), compression, 0) + if err != nil { + t.Fatal("First packing failed:", err) + } + buf, err = name.pack(buf, compression, 0) + if err != nil { + t.Fatal("Second packing failed:", err) + } + var n1 Name + off, err := n1.unpackCompressed(buf, 0, false /* allowCompression */) + if err != nil { + t.Fatal("Unpacking incompressible name without pointers failed:", err) + } + var n2 Name + if _, err := n2.unpackCompressed(buf, off, false /* allowCompression */); err != errCompressedSRV { + t.Errorf("Unpacking compressed incompressible name with pointers: got err = %v, want = %v", err, errCompressedSRV) + } +} + func checkErrorPrefix(err error, prefix string) bool { e, ok := err.(*nestedError) return ok && e.s == prefix @@ -444,7 +467,15 @@ func TestVeryLongTxt(t *testing.T) { Type: TypeTXT, Class: ClassINET, }, - &TXTResource{loremIpsum}, + &TXTResource{[]string{ + "", + "", + "foo bar", + "", + "www.example.com", + "www.example.com.", + strings.Repeat(".", 255), + }}, } buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0) if err != nil { @@ -468,6 +499,13 @@ func TestVeryLongTxt(t *testing.T) { } } +func TestTooLongTxt(t *testing.T) { + rb := TXTResource{[]string{strings.Repeat(".", 256)}} + if _, err := rb.pack(make([]byte, 0, 8000), map[string]int{}, 0); err != errStringTooLong { + t.Errorf("Packing TXTRecord with 256 character string: got err = %v, want = %v", err, errStringTooLong) + } +} + func TestStartAppends(t *testing.T) { buf := make([]byte, 2, 514) wantBuf := []byte{4, 44} @@ -1084,7 +1122,7 @@ func largeTestMsg() Message { Type: TypeTXT, Class: ClassINET, }, - &TXTResource{"So Long, and Thanks for All the Fish"}, + &TXTResource{[]string{"So Long, and Thanks for All the Fish"}}, }, { ResourceHeader{ @@ -1092,139 +1130,8 @@ func largeTestMsg() Message { Type: TypeTXT, Class: ClassINET, }, - &TXTResource{"Hamster Huey and the Gooey Kablooie"}, + &TXTResource{[]string{"Hamster Huey and the Gooey Kablooie"}}, }, }, } } - -const loremIpsum = ` -Lorem ipsum dolor sit amet, nec enim antiopam id, an ullum choro -nonumes qui, pro eu debet honestatis mediocritatem. No alia enim eos, -magna signiferumque ex vis. Mei no aperiri dissentias, cu vel quas -regione. Malorum quaeque vim ut, eum cu semper aliquid invidunt, ei -nam ipsum assentior. - -Nostrum appellantur usu no, vis ex probatus adipiscing. Cu usu illum -facilis eleifend. Iusto conceptam complectitur vim id. Tale omnesque -no usu, ei oblique sadipscing vim. At nullam voluptua usu, mei laudem -reformidans et. Qui ei eros porro reformidans, ius suas veritus -torquatos ex. Mea te facer alterum consequat. - -Soleat torquatos democritum sed et, no mea congue appareat, facer -aliquam nec in. Has te ipsum tritani. At justo dicta option nec, movet -phaedrum ad nam. Ea detracto verterem liberavisse has, delectus -suscipiantur in mei. Ex nam meliore complectitur. Ut nam omnis -honestatis quaerendum, ea mea nihil affert detracto, ad vix rebum -mollis. - -Ut epicurei praesent neglegentur pri, prima fuisset intellegebat ad -vim. An habemus comprehensam usu, at enim dignissim pro. Eam reque -vivendum adipisci ea. Vel ne odio choro minimum. Sea admodum -dissentiet ex. Mundi tamquam evertitur ius cu. Homero postea iisque ut -pro, vel ne saepe senserit consetetur. - -Nulla utamur facilisis ius ea, in viderer diceret pertinax eum. Mei no -enim quodsi facilisi, ex sed aeterno appareat mediocritatem, eum -sententiae deterruisset ut. At suas timeam euismod cum, offendit -appareat interpretaris ne vix. Vel ea civibus albucius, ex vim quidam -accusata intellegebat, noluisse instructior sea id. Nec te nonumes -habemus appellantur, quis dignissim vituperata eu nam. - -At vix apeirian patrioque vituperatoribus, an usu agam assum. Debet -iisque an mea. Per eu dicant ponderum accommodare. Pri alienum -placerat senserit an, ne eum ferri abhorreant vituperatoribus. Ut mea -eligendi disputationi. Ius no tation everti impedit, ei magna quidam -mediocritatem pri. - -Legendos perpetua iracundia ne usu, no ius ullum epicurei intellegam, -ad modus epicuri lucilius eam. In unum quaerendum usu. Ne diam paulo -has, ea veri virtute sed. Alia honestatis conclusionemque mea eu, ut -iudico albucius his. - -Usu essent probatus eu, sed omnis dolor delicatissimi ex. No qui augue -dissentias dissentiet. Laudem recteque no usu, vel an velit noluisse, -an sed utinam eirmod appetere. Ne mea fuisset inimicus ocurreret. At -vis dicant abhorreant, utinam forensibus nec ne, mei te docendi -consequat. Brute inermis persecuti cum id. Ut ipsum munere propriae -usu, dicit graeco disputando id has. - -Eros dolore quaerendum nam ei. Timeam ornatus inciderint pro id. Nec -torquatos sadipscing ei, ancillae molestie per in. Malis principes duo -ea, usu liber postulant ei. - -Graece timeam voluptatibus eu eam. Alia probatus quo no, ea scripta -feugiat duo. Congue option meliore ex qui, noster invenire appellantur -ea vel. Eu exerci legendos vel. Consetetur repudiandae vim ut. Vix an -probo minimum, et nam illud falli tempor. - -Cum dico signiferumque eu. Sed ut regione maiorum, id veritus insolens -tacimates vix. Eu mel sint tamquam lucilius, duo no oporteat -tacimates. Atqui augue concludaturque vix ei, id mel utroque menandri. - -Ad oratio blandit aliquando pro. Vis et dolorum rationibus -philosophia, ad cum nulla molestie. Hinc fuisset adversarium eum et, -ne qui nisl verear saperet, vel te quaestio forensibus. Per odio -option delenit an. Alii placerat has no, in pri nihil platonem -cotidieque. Est ut elit copiosae scaevola, debet tollit maluisset sea -an. - -Te sea hinc debet pericula, liber ridens fabulas cu sed, quem mutat -accusam mea et. Elitr labitur albucius et pri, an labore feugait mel. -Velit zril melius usu ea. Ad stet putent interpretaris qui. Mel no -error volumus scripserit. In pro paulo iudico, quo ei dolorem -verterem, affert fabellas dissentiet ea vix. - -Vis quot deserunt te. Error aliquid detraxit eu usu, vis alia eruditi -salutatus cu. Est nostrud bonorum an, ei usu alii salutatus. Vel at -nisl primis, eum ex aperiri noluisse reformidans. Ad veri velit -utroque vis, ex equidem detraxit temporibus has. - -Inermis appareat usu ne. Eros placerat periculis mea ad, in dictas -pericula pro. Errem postulant at usu, ea nec amet ornatus mentitum. Ad -mazim graeco eum, vel ex percipit volutpat iudicabit, sit ne delicata -interesset. Mel sapientem prodesset abhorreant et, oblique suscipit -eam id. - -An maluisset disputando mea, vidit mnesarchum pri et. Malis insolens -inciderint no sea. Ea persius maluisset vix, ne vim appellantur -instructior, consul quidam definiebas pri id. Cum integre feugiat -pericula in, ex sed persius similique, mel ne natum dicit percipitur. - -Primis discere ne pri, errem putent definitionem at vis. Ei mel dolore -neglegentur, mei tincidunt percipitur ei. Pro ad simul integre -rationibus. Eu vel alii honestatis definitiones, mea no nonumy -reprehendunt. - -Dicta appareat legendos est cu. Eu vel congue dicunt omittam, no vix -adhuc minimum constituam, quot noluisse id mel. Eu quot sale mutat -duo, ex nisl munere invenire duo. Ne nec ullum utamur. Pro alterum -debitis nostrum no, ut vel aliquid vivendo. - -Aliquip fierent praesent quo ne, id sit audiam recusabo delicatissimi. -Usu postulant incorrupte cu. At pro dicit tibique intellegam, cibo -dolore impedit id eam, et aeque feugait assentior has. Quando sensibus -nec ex. Possit sensibus pri ad, unum mutat periculis cu vix. - -Mundi tibique vix te, duo simul partiendo qualisque id, est at vidit -sonet tempor. No per solet aeterno deseruisse. Petentium salutandi -definiebas pri cu. Munere vivendum est in. Ei justo congue eligendi -vis, modus offendit omittantur te mel. - -Integre voluptaria in qui, sit habemus tractatos constituam no. Utinam -melius conceptam est ne, quo in minimum apeirian delicata, ut ius -porro recusabo. Dicant expetenda vix no, ludus scripserit sed ex, eu -his modo nostro. Ut etiam sonet his, quodsi inciderint philosophia te -per. Nullam lobortis eu cum, vix an sonet efficiendi repudiandae. Vis -ad idque fabellas intellegebat. - -Eum commodo senserit conclusionemque ex. Sed forensibus sadipscing ut, -mei in facer delicata periculis, sea ne hinc putent cetero. Nec ne -alia corpora invenire, alia prima soleat te cum. Eleifend posidonium -nam at. - -Dolorum indoctum cu quo, ex dolor legendos recteque eam, cu pri zril -discere. Nec civibus officiis dissentiunt ex, est te liber ludus -elaboraret. Cum ea fabellas invenire. Ex vim nostrud eripuit -comprehensam, nam te inermis delectus, saepe inermis senserit. -` diff --git a/vendor/golang.org/x/net/icmp/diag_test.go b/vendor/golang.org/x/net/icmp/diag_test.go new file mode 100644 index 000000000..2ecd465a1 --- /dev/null +++ b/vendor/golang.org/x/net/icmp/diag_test.go @@ -0,0 +1,274 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package icmp_test + +import ( + "errors" + "fmt" + "net" + "os" + "runtime" + "sync" + "testing" + "time" + + "golang.org/x/net/icmp" + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/nettest" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +type diagTest struct { + network, address string + protocol int + m icmp.Message +} + +func TestDiag(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + + t.Run("Ping/NonPrivileged", func(t *testing.T) { + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Logf("not supported on %s", runtime.GOOS) + return + } + for i, dt := range []diagTest{ + { + "udp4", "0.0.0.0", iana.ProtocolICMP, + icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + + { + "udp6", "::", iana.ProtocolIPv6ICMP, + icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + } { + if err := doDiag(dt, i); err != nil { + t.Error(err) + } + } + }) + t.Run("Ping/Privileged", func(t *testing.T) { + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + for i, dt := range []diagTest{ + { + "ip4:icmp", "0.0.0.0", iana.ProtocolICMP, + icmp.Message{ + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + + { + "ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, + icmp.Message{ + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + }, + } { + if err := doDiag(dt, i); err != nil { + t.Error(err) + } + } + }) + t.Run("Probe/Privileged", func(t *testing.T) { + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + for i, dt := range []diagTest{ + { + "ip4:icmp", "0.0.0.0", iana.ProtocolICMP, + icmp.Message{ + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: os.Getpid() & 0xffff, + Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, Type: 1, + Name: "doesnotexist", + }, + }, + }, + }, + }, + + { + "ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, + icmp.Message{ + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: os.Getpid() & 0xffff, + Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, Type: 1, + Name: "doesnotexist", + }, + }, + }, + }, + }, + } { + if err := doDiag(dt, i); err != nil { + t.Error(err) + } + } + }) +} + +func doDiag(dt diagTest, seq int) error { + c, err := icmp.ListenPacket(dt.network, dt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, dt.protocol) + if err != nil { + return err + } + + if dt.network != "udp6" && dt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + f.Accept(ipv6.ICMPTypeExtendedEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + switch m := dt.m.Body.(type) { + case *icmp.Echo: + m.Seq = 1 << uint(seq) + case *icmp.ExtendedEchoRequest: + m.Seq = 1 << uint(seq) + } + wb, err := dt.m.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(dt.protocol, rb[:n]) + if err != nil { + return err + } + switch { + case dt.m.Type == ipv4.ICMPTypeEcho && rm.Type == ipv4.ICMPTypeEchoReply: + fallthrough + case dt.m.Type == ipv6.ICMPTypeEchoRequest && rm.Type == ipv6.ICMPTypeEchoReply: + fallthrough + case dt.m.Type == ipv4.ICMPTypeExtendedEchoRequest && rm.Type == ipv4.ICMPTypeExtendedEchoReply: + fallthrough + case dt.m.Type == ipv6.ICMPTypeExtendedEchoRequest && rm.Type == ipv6.ICMPTypeExtendedEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply or extended echo reply", rm, peer) + } +} + +func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { + host := "ipv4.google.com" + if protocol == iana.ProtocolIPv6ICMP { + host = "ipv6.google.com" + } + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + netaddr := func(ip net.IP) (net.Addr, error) { + switch c.LocalAddr().(type) { + case *net.UDPAddr: + return &net.UDPAddr{IP: ip}, nil + case *net.IPAddr: + return &net.IPAddr{IP: ip}, nil + default: + return nil, errors.New("neither UDPAddr nor IPAddr") + } + } + if len(ips) > 0 { + return netaddr(ips[0]) + } + return nil, errors.New("no A or AAAA record") +} + +func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + switch runtime.GOOS { + case "darwin": + case "linux": + t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + t.Skipf("not supported on %s", runtime.GOOS) + } + + network, address := "udp4", "127.0.0.1" + if !nettest.SupportsIPv4() { + network, address = "udp6", "::1" + } + const N = 1000 + var wg sync.WaitGroup + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + c, err := icmp.ListenPacket(network, address) + if err != nil { + t.Error(err) + return + } + c.Close() + }() + } + wg.Wait() +} diff --git a/vendor/golang.org/x/net/icmp/dstunreach.go b/vendor/golang.org/x/net/icmp/dstunreach.go index 75db991df..7464bf7ea 100644 --- a/vendor/golang.org/x/net/icmp/dstunreach.go +++ b/vendor/golang.org/x/net/icmp/dstunreach.go @@ -16,24 +16,24 @@ func (p *DstUnreach) Len(proto int) int { if p == nil { return 0 } - l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) return 4 + l } // Marshal implements the Marshal method of MessageBody interface. func (p *DstUnreach) Marshal(proto int) ([]byte, error) { - return marshalMultipartMessageBody(proto, p.Data, p.Extensions) + return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) } // parseDstUnreach parses b as an ICMP destination unreachable message // body. -func parseDstUnreach(proto int, b []byte) (MessageBody, error) { +func parseDstUnreach(proto int, typ Type, b []byte) (MessageBody, error) { if len(b) < 4 { return nil, errMessageTooShort } p := &DstUnreach{} var err error - p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/icmp/echo.go b/vendor/golang.org/x/net/icmp/echo.go index e6f15efd7..c611f6516 100644 --- a/vendor/golang.org/x/net/icmp/echo.go +++ b/vendor/golang.org/x/net/icmp/echo.go @@ -31,7 +31,7 @@ func (p *Echo) Marshal(proto int) ([]byte, error) { } // parseEcho parses b as an ICMP echo request or reply message body. -func parseEcho(proto int, b []byte) (MessageBody, error) { +func parseEcho(proto int, _ Type, b []byte) (MessageBody, error) { bodyLen := len(b) if bodyLen < 4 { return nil, errMessageTooShort @@ -43,3 +43,115 @@ func parseEcho(proto int, b []byte) (MessageBody, error) { } return p, nil } + +// An ExtendedEchoRequest represents an ICMP extended echo request +// message body. +type ExtendedEchoRequest struct { + ID int // identifier + Seq int // sequence number + Local bool // must be true when identifying by name or index + Extensions []Extension // extensions +} + +// Len implements the Len method of MessageBody interface. +func (p *ExtendedEchoRequest) Len(proto int) int { + if p == nil { + return 0 + } + l, _ := multipartMessageBodyDataLen(proto, false, nil, p.Extensions) + return 4 + l +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ExtendedEchoRequest) Marshal(proto int) ([]byte, error) { + b, err := marshalMultipartMessageBody(proto, false, nil, p.Extensions) + if err != nil { + return nil, err + } + bb := make([]byte, 4) + binary.BigEndian.PutUint16(bb[:2], uint16(p.ID)) + bb[2] = byte(p.Seq) + if p.Local { + bb[3] |= 0x01 + } + bb = append(bb, b...) + return bb, nil +} + +// parseExtendedEchoRequest parses b as an ICMP extended echo request +// message body. +func parseExtendedEchoRequest(proto int, typ Type, b []byte) (MessageBody, error) { + if len(b) < 4+4 { + return nil, errMessageTooShort + } + p := &ExtendedEchoRequest{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(b[2])} + if b[3]&0x01 != 0 { + p.Local = true + } + var err error + _, p.Extensions, err = parseMultipartMessageBody(proto, typ, b[4:]) + if err != nil { + return nil, err + } + return p, nil +} + +// An ExtendedEchoReply represents an ICMP extended echo reply message +// body. +type ExtendedEchoReply struct { + ID int // identifier + Seq int // sequence number + State int // 3-bit state working together with Message.Code + Active bool // probed interface is active + IPv4 bool // probed interface runs IPv4 + IPv6 bool // probed interface runs IPv6 +} + +// Len implements the Len method of MessageBody interface. +func (p *ExtendedEchoReply) Len(proto int) int { + if p == nil { + return 0 + } + return 4 +} + +// Marshal implements the Marshal method of MessageBody interface. +func (p *ExtendedEchoReply) Marshal(proto int) ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[:2], uint16(p.ID)) + b[2] = byte(p.Seq) + b[3] = byte(p.State<<5) & 0xe0 + if p.Active { + b[3] |= 0x04 + } + if p.IPv4 { + b[3] |= 0x02 + } + if p.IPv6 { + b[3] |= 0x01 + } + return b, nil +} + +// parseExtendedEchoReply parses b as an ICMP extended echo reply +// message body. +func parseExtendedEchoReply(proto int, _ Type, b []byte) (MessageBody, error) { + if len(b) < 4 { + return nil, errMessageTooShort + } + p := &ExtendedEchoReply{ + ID: int(binary.BigEndian.Uint16(b[:2])), + Seq: int(b[2]), + State: int(b[3]) >> 5, + } + if b[3]&0x04 != 0 { + p.Active = true + } + if b[3]&0x02 != 0 { + p.IPv4 = true + } + if b[3]&0x01 != 0 { + p.IPv6 = true + } + return p, nil +} diff --git a/vendor/golang.org/x/net/icmp/extension.go b/vendor/golang.org/x/net/icmp/extension.go index 402a7514b..200506855 100644 --- a/vendor/golang.org/x/net/icmp/extension.go +++ b/vendor/golang.org/x/net/icmp/extension.go @@ -4,7 +4,12 @@ package icmp -import "encoding/binary" +import ( + "encoding/binary" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) // An Extension represents an ICMP extension. type Extension interface { @@ -38,7 +43,7 @@ func validExtensionHeader(b []byte) bool { // It will return a list of ICMP extensions and an adjusted length // attribute that represents the length of the padded original // datagram field. Otherwise, it returns an error. -func parseExtensions(b []byte, l int) ([]Extension, int, error) { +func parseExtensions(typ Type, b []byte, l int) ([]Extension, int, error) { // Still a lot of non-RFC 4884 compliant implementations are // out there. Set the length attribute l to 128 when it looks // inappropriate for backwards compatibility. @@ -48,20 +53,28 @@ func parseExtensions(b []byte, l int) ([]Extension, int, error) { // header. // // See RFC 4884 for further information. - if 128 > l || l+8 > len(b) { - l = 128 - } - if l+8 > len(b) { - return nil, -1, errNoExtension - } - if !validExtensionHeader(b[l:]) { - if l == 128 { + switch typ { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + if len(b) < 8 || !validExtensionHeader(b) { return nil, -1, errNoExtension } - l = 128 - if !validExtensionHeader(b[l:]) { + l = 0 + default: + if 128 > l || l+8 > len(b) { + l = 128 + } + if l+8 > len(b) { return nil, -1, errNoExtension } + if !validExtensionHeader(b[l:]) { + if l == 128 { + return nil, -1, errNoExtension + } + l = 128 + if !validExtensionHeader(b[l:]) { + return nil, -1, errNoExtension + } + } } var exts []Extension for b = b[l+4:]; len(b) >= 4; { @@ -82,6 +95,12 @@ func parseExtensions(b []byte, l int) ([]Extension, int, error) { return nil, -1, err } exts = append(exts, ext) + case classInterfaceIdent: + ext, err := parseInterfaceIdent(b[:ol]) + if err != nil { + return nil, -1, err + } + exts = append(exts, ext) } b = b[ol:] } diff --git a/vendor/golang.org/x/net/icmp/extension_test.go b/vendor/golang.org/x/net/icmp/extension_test.go index 0b3f7b9e1..a7669dae0 100644 --- a/vendor/golang.org/x/net/icmp/extension_test.go +++ b/vendor/golang.org/x/net/icmp/extension_test.go @@ -5,253 +5,327 @@ package icmp import ( + "fmt" "net" "reflect" "testing" "golang.org/x/net/internal/iana" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" ) -var marshalAndParseExtensionTests = []struct { - proto int - hdr []byte - obj []byte - exts []Extension -}{ - // MPLS label stack with no label - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x04, 0x01, 0x01, - }, - exts: []Extension{ - &MPLSLabelStack{ - Class: classMPLSLabelStack, - Type: typeIncomingMPLSLabelStack, - }, - }, - }, - // MPLS label stack with a single label - { - proto: iana.ProtocolIPv6ICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x08, 0x01, 0x01, - 0x03, 0xe8, 0xe9, 0xff, - }, - exts: []Extension{ - &MPLSLabelStack{ - Class: classMPLSLabelStack, - Type: typeIncomingMPLSLabelStack, - Labels: []MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - }, - }, - // MPLS label stack with multiple labels - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x0c, 0x01, 0x01, - 0x03, 0xe8, 0xde, 0xfe, - 0x03, 0xe8, 0xe1, 0xff, - }, - exts: []Extension{ - &MPLSLabelStack{ - Class: classMPLSLabelStack, - Type: typeIncomingMPLSLabelStack, - Labels: []MPLSLabel{ - { - Label: 16013, - TC: 0x7, - S: false, - TTL: 254, - }, - { - Label: 16014, - TC: 0, - S: true, - TTL: 255, - }, - }, - }, - }, - }, - // Interface information with no attribute - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x04, 0x02, 0x00, - }, - exts: []Extension{ - &InterfaceInfo{ - Class: classInterfaceInfo, - }, - }, - }, - // Interface information with ifIndex and name - { - proto: iana.ProtocolICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x10, 0x02, 0x0a, - 0x00, 0x00, 0x00, 0x10, - 0x08, byte('e'), byte('n'), byte('1'), - byte('0'), byte('1'), 0x00, 0x00, - }, - exts: []Extension{ - &InterfaceInfo{ - Class: classInterfaceInfo, - Type: 0x0a, - Interface: &net.Interface{ - Index: 16, - Name: "en101", - }, - }, - }, - }, - // Interface information with ifIndex, IPAddr, name and MTU - { - proto: iana.ProtocolIPv6ICMP, - hdr: []byte{ - 0x20, 0x00, 0x00, 0x00, - }, - obj: []byte{ - 0x00, 0x28, 0x02, 0x0f, - 0x00, 0x00, 0x00, 0x0f, - 0x00, 0x02, 0x00, 0x00, - 0xfe, 0x80, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, - 0x08, byte('e'), byte('n'), byte('1'), - byte('0'), byte('1'), 0x00, 0x00, - 0x00, 0x00, 0x20, 0x00, - }, - exts: []Extension{ - &InterfaceInfo{ - Class: classInterfaceInfo, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en101", - }, - }, - }, - }, -} - func TestMarshalAndParseExtension(t *testing.T) { - for i, tt := range marshalAndParseExtensionTests { - for j, ext := range tt.exts { - var err error - var b []byte - switch ext := ext.(type) { - case *MPLSLabelStack: - b, err = ext.Marshal(tt.proto) - if err != nil { - t.Errorf("#%v/%v: %v", i, j, err) + fn := func(t *testing.T, proto int, typ Type, hdr, obj []byte, te Extension) error { + b, err := te.Marshal(proto) + if err != nil { + return err + } + if !reflect.DeepEqual(b, obj) { + return fmt.Errorf("got %#v; want %#v", b, obj) + } + switch typ { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + exts, l, err := parseExtensions(typ, append(hdr, obj...), 0) + if err != nil { + return err + } + if l != 0 { + return fmt.Errorf("got %d; want 0", l) + } + if !reflect.DeepEqual(exts, []Extension{te}) { + return fmt.Errorf("got %#v; want %#v", exts[0], te) + } + default: + for i, wire := range []struct { + data []byte // original datagram + inlattr int // length of padded original datagram, a hint + outlattr int // length of padded original datagram, a want + err error + }{ + {nil, 0, -1, errNoExtension}, + {make([]byte, 127), 128, -1, errNoExtension}, + + {make([]byte, 128), 127, -1, errNoExtension}, + {make([]byte, 128), 128, -1, errNoExtension}, + {make([]byte, 128), 129, -1, errNoExtension}, + + {append(make([]byte, 128), append(hdr, obj...)...), 127, 128, nil}, + {append(make([]byte, 128), append(hdr, obj...)...), 128, 128, nil}, + {append(make([]byte, 128), append(hdr, obj...)...), 129, 128, nil}, + + {append(make([]byte, 512), append(hdr, obj...)...), 511, -1, errNoExtension}, + {append(make([]byte, 512), append(hdr, obj...)...), 512, 512, nil}, + {append(make([]byte, 512), append(hdr, obj...)...), 513, -1, errNoExtension}, + } { + exts, l, err := parseExtensions(typ, wire.data, wire.inlattr) + if err != wire.err { + return fmt.Errorf("#%d: got %v; want %v", i, err, wire.err) + } + if wire.err != nil { continue } - case *InterfaceInfo: - b, err = ext.Marshal(tt.proto) - if err != nil { - t.Errorf("#%v/%v: %v", i, j, err) - continue + if l != wire.outlattr { + return fmt.Errorf("#%d: got %d; want %d", i, l, wire.outlattr) + } + if !reflect.DeepEqual(exts, []Extension{te}) { + return fmt.Errorf("#%d: got %#v; want %#v", i, exts[0], te) } } - if !reflect.DeepEqual(b, tt.obj) { - t.Errorf("#%v/%v: got %#v; want %#v", i, j, b, tt.obj) - continue - } - } - - for j, wire := range []struct { - data []byte // original datagram - inlattr int // length of padded original datagram, a hint - outlattr int // length of padded original datagram, a want - err error - }{ - {nil, 0, -1, errNoExtension}, - {make([]byte, 127), 128, -1, errNoExtension}, - - {make([]byte, 128), 127, -1, errNoExtension}, - {make([]byte, 128), 128, -1, errNoExtension}, - {make([]byte, 128), 129, -1, errNoExtension}, - - {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 127, 128, nil}, - {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 128, 128, nil}, - {append(make([]byte, 128), append(tt.hdr, tt.obj...)...), 129, 128, nil}, - - {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 511, -1, errNoExtension}, - {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 512, 512, nil}, - {append(make([]byte, 512), append(tt.hdr, tt.obj...)...), 513, -1, errNoExtension}, - } { - exts, l, err := parseExtensions(wire.data, wire.inlattr) - if err != wire.err { - t.Errorf("#%v/%v: got %v; want %v", i, j, err, wire.err) - continue - } - if wire.err != nil { - continue - } - if l != wire.outlattr { - t.Errorf("#%v/%v: got %v; want %v", i, j, l, wire.outlattr) - } - if !reflect.DeepEqual(exts, tt.exts) { - for j, ext := range exts { - switch ext := ext.(type) { - case *MPLSLabelStack: - want := tt.exts[j].(*MPLSLabelStack) - t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) - case *InterfaceInfo: - want := tt.exts[j].(*InterfaceInfo) - t.Errorf("#%v/%v: got %#v; want %#v", i, j, ext, want) - } - } - continue - } } + return nil } -} -var parseInterfaceNameTests = []struct { - b []byte - error -}{ - {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, - {[]byte{4, 'e', 'n', '0'}, nil}, - {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, - {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, + t.Run("MPLSLabelStack", func(t *testing.T) { + for _, et := range []struct { + proto int + typ Type + hdr []byte + obj []byte + ext Extension + }{ + // MPLS label stack with no label + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x01, 0x01, + }, + ext: &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + }, + }, + // MPLS label stack with a single label + { + proto: iana.ProtocolIPv6ICMP, + typ: ipv6.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x08, 0x01, 0x01, + 0x03, 0xe8, 0xe9, 0xff, + }, + ext: &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + }, + // MPLS label stack with multiple labels + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x01, 0x01, + 0x03, 0xe8, 0xde, 0xfe, + 0x03, 0xe8, 0xe1, 0xff, + }, + ext: &MPLSLabelStack{ + Class: classMPLSLabelStack, + Type: typeIncomingMPLSLabelStack, + Labels: []MPLSLabel{ + { + Label: 16013, + TC: 0x7, + S: false, + TTL: 254, + }, + { + Label: 16014, + TC: 0, + S: true, + TTL: 255, + }, + }, + }, + }, + } { + if err := fn(t, et.proto, et.typ, et.hdr, et.obj, et.ext); err != nil { + t.Error(err) + } + } + }) + t.Run("InterfaceInfo", func(t *testing.T) { + for _, et := range []struct { + proto int + typ Type + hdr []byte + obj []byte + ext Extension + }{ + // Interface information with no attribute + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x04, 0x02, 0x00, + }, + ext: &InterfaceInfo{ + Class: classInterfaceInfo, + }, + }, + // Interface information with ifIndex and name + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x02, 0x0a, + 0x00, 0x00, 0x00, 0x10, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + }, + ext: &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0a, + Interface: &net.Interface{ + Index: 16, + Name: "en101", + }, + }, + }, + // Interface information with ifIndex, IPAddr, name and MTU + { + proto: iana.ProtocolIPv6ICMP, + typ: ipv6.ICMPTypeDestinationUnreachable, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x28, 0x02, 0x0f, + 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x02, 0x00, 0x00, + 0xfe, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x08, byte('e'), byte('n'), byte('1'), + byte('0'), byte('1'), 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, + }, + ext: &InterfaceInfo{ + Class: classInterfaceInfo, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + } { + if err := fn(t, et.proto, et.typ, et.hdr, et.obj, et.ext); err != nil { + t.Error(err) + } + } + }) + t.Run("InterfaceIdent", func(t *testing.T) { + for _, et := range []struct { + proto int + typ Type + hdr []byte + obj []byte + ext Extension + }{ + // Interface identification by name + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeExtendedEchoRequest, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x03, 0x01, + byte('e'), byte('n'), byte('1'), byte('0'), + byte('1'), 0x00, 0x00, 0x00, + }, + ext: &InterfaceIdent{ + Class: classInterfaceIdent, + Type: typeInterfaceByName, + Name: "en101", + }, + }, + // Interface identification by index + { + proto: iana.ProtocolIPv6ICMP, + typ: ipv6.ICMPTypeExtendedEchoRequest, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x0c, 0x03, 0x02, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x8f, + }, + ext: &InterfaceIdent{ + Class: classInterfaceIdent, + Type: typeInterfaceByIndex, + Index: 911, + }, + }, + // Interface identification by address + { + proto: iana.ProtocolICMP, + typ: ipv4.ICMPTypeExtendedEchoRequest, + hdr: []byte{ + 0x20, 0x00, 0x00, 0x00, + }, + obj: []byte{ + 0x00, 0x10, 0x03, 0x03, + byte(iana.AddrFamily48bitMAC >> 8), byte(iana.AddrFamily48bitMAC & 0x0f), 0x06, 0x00, + 0x01, 0x23, 0x45, 0x67, + 0x89, 0xab, 0x00, 0x00, + }, + ext: &InterfaceIdent{ + Class: classInterfaceIdent, + Type: typeInterfaceByAddress, + AFI: iana.AddrFamily48bitMAC, + Addr: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}, + }, + }, + } { + if err := fn(t, et.proto, et.typ, et.hdr, et.obj, et.ext); err != nil { + t.Error(err) + } + } + }) } func TestParseInterfaceName(t *testing.T) { ifi := InterfaceInfo{Interface: &net.Interface{}} - for i, tt := range parseInterfaceNameTests { + for i, tt := range []struct { + b []byte + error + }{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, + } { if _, err := ifi.parseName(tt.b); err != tt.error { t.Errorf("#%d: got %v; want %v", i, err, tt.error) } diff --git a/vendor/golang.org/x/net/icmp/interface.go b/vendor/golang.org/x/net/icmp/interface.go index 78b5b98bf..617f757b9 100644 --- a/vendor/golang.org/x/net/icmp/interface.go +++ b/vendor/golang.org/x/net/icmp/interface.go @@ -14,9 +14,6 @@ import ( const ( classInterfaceInfo = 2 - - afiIPv4 = 1 - afiIPv6 = 2 ) const ( @@ -127,11 +124,11 @@ func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) { func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte { switch proto { case iana.ProtocolICMP: - binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4)) + binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv4)) copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4()) b = b[4+net.IPv4len:] case iana.ProtocolIPv6ICMP: - binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6)) + binary.BigEndian.PutUint16(b[:2], uint16(iana.AddrFamilyIPv6)) copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16()) b = b[4+net.IPv6len:] } @@ -145,14 +142,14 @@ func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) { afi := int(binary.BigEndian.Uint16(b[:2])) b = b[4:] switch afi { - case afiIPv4: + case iana.AddrFamilyIPv4: if len(b) < net.IPv4len { return nil, errMessageTooShort } ifi.Addr.IP = make(net.IP, net.IPv4len) copy(ifi.Addr.IP, b[:net.IPv4len]) b = b[net.IPv4len:] - case afiIPv6: + case iana.AddrFamilyIPv6: if len(b) < net.IPv6len { return nil, errMessageTooShort } @@ -234,3 +231,92 @@ func parseInterfaceInfo(b []byte) (Extension, error) { } return ifi, nil } + +const ( + classInterfaceIdent = 3 + typeInterfaceByName = 1 + typeInterfaceByIndex = 2 + typeInterfaceByAddress = 3 +) + +// An InterfaceIdent represents interface identification. +type InterfaceIdent struct { + Class int // extension object class number + Type int // extension object sub-type + Name string // interface name + Index int // interface index + AFI int // address family identifier; see address family numbers in IANA registry + Addr []byte // address +} + +// Len implements the Len method of Extension interface. +func (ifi *InterfaceIdent) Len(_ int) int { + switch ifi.Type { + case typeInterfaceByName: + l := len(ifi.Name) + if l > 255 { + l = 255 + } + return 4 + (l+3)&^3 + case typeInterfaceByIndex: + return 4 + 8 + case typeInterfaceByAddress: + return 4 + 4 + (len(ifi.Addr)+3)&^3 + default: + return 4 + } +} + +// Marshal implements the Marshal method of Extension interface. +func (ifi *InterfaceIdent) Marshal(proto int) ([]byte, error) { + b := make([]byte, ifi.Len(proto)) + if err := ifi.marshal(proto, b); err != nil { + return nil, err + } + return b, nil +} + +func (ifi *InterfaceIdent) marshal(proto int, b []byte) error { + l := ifi.Len(proto) + binary.BigEndian.PutUint16(b[:2], uint16(l)) + b[2], b[3] = classInterfaceIdent, byte(ifi.Type) + switch ifi.Type { + case typeInterfaceByName: + copy(b[4:], ifi.Name) + case typeInterfaceByIndex: + binary.BigEndian.PutUint64(b[4:4+8], uint64(ifi.Index)) + case typeInterfaceByAddress: + binary.BigEndian.PutUint16(b[4:4+2], uint16(ifi.AFI)) + b[4+2] = byte(len(ifi.Addr)) + copy(b[4+4:], ifi.Addr) + } + return nil +} + +func parseInterfaceIdent(b []byte) (Extension, error) { + ifi := &InterfaceIdent{ + Class: int(b[2]), + Type: int(b[3]), + } + switch ifi.Type { + case typeInterfaceByName: + ifi.Name = strings.Trim(string(b[4:]), string(0)) + case typeInterfaceByIndex: + if len(b[4:]) < 8 { + return nil, errInvalidExtension + } + ifi.Index = int(binary.BigEndian.Uint64(b[4 : 4+8])) + case typeInterfaceByAddress: + if len(b[4:]) < 4 { + return nil, errInvalidExtension + } + ifi.AFI = int(binary.BigEndian.Uint16(b[4 : 4+2])) + l := int(b[4+2]) + if len(b[4+4:]) < l { + return nil, errInvalidExtension + } + ifi.Addr = make([]byte, l) + copy(ifi.Addr, b[4+4:]) + } + return ifi, nil +} diff --git a/vendor/golang.org/x/net/icmp/ipv4_test.go b/vendor/golang.org/x/net/icmp/ipv4_test.go index 058953f43..3fdee83fb 100644 --- a/vendor/golang.org/x/net/icmp/ipv4_test.go +++ b/vendor/golang.org/x/net/icmp/ipv4_test.go @@ -15,69 +15,61 @@ import ( "golang.org/x/net/ipv4" ) -type ipv4HeaderTest struct { - wireHeaderFromKernel [ipv4.HeaderLen]byte - wireHeaderFromTradBSDKernel [ipv4.HeaderLen]byte - Header *ipv4.Header -} - -var ipv4HeaderLittleEndianTest = ipv4HeaderTest{ - // TODO(mikio): Add platform dependent wire header formats when - // we support new platforms. - wireHeaderFromKernel: [ipv4.HeaderLen]byte{ - 0x45, 0x01, 0xbe, 0xef, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - wireHeaderFromTradBSDKernel: [ipv4.HeaderLen]byte{ - 0x45, 0x01, 0xef, 0xbe, - 0xca, 0xfe, 0x45, 0xdc, - 0xff, 0x01, 0xde, 0xad, - 172, 16, 254, 254, - 192, 168, 0, 1, - }, - Header: &ipv4.Header{ - Version: ipv4.Version, - Len: ipv4.HeaderLen, - TOS: 1, - TotalLen: 0xbeef, - ID: 0xcafe, - Flags: ipv4.DontFragment, - FragOff: 1500, - TTL: 255, - Protocol: 1, - Checksum: 0xdead, - Src: net.IPv4(172, 16, 254, 254), - Dst: net.IPv4(192, 168, 0, 1), - }, -} - func TestParseIPv4Header(t *testing.T) { - tt := &ipv4HeaderLittleEndianTest - if socket.NativeEndian != binary.LittleEndian { - t.Skip("no test for non-little endian machine yet") - } - - var wh []byte - switch runtime.GOOS { - case "darwin": - wh = tt.wireHeaderFromTradBSDKernel[:] - case "freebsd": - if freebsdVersion >= 1000000 { - wh = tt.wireHeaderFromKernel[:] - } else { - wh = tt.wireHeaderFromTradBSDKernel[:] - } - default: - wh = tt.wireHeaderFromKernel[:] - } - h, err := ParseIPv4Header(wh) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(h, tt.Header) { - t.Fatalf("got %#v; want %#v", h, tt.Header) + switch socket.NativeEndian { + case binary.LittleEndian: + t.Run("LittleEndian", func(t *testing.T) { + // TODO(mikio): Add platform dependent wire + // header formats when we support new + // platforms. + wireHeaderFromKernel := [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xbe, 0xef, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + wireHeaderFromTradBSDKernel := [ipv4.HeaderLen]byte{ + 0x45, 0x01, 0xef, 0xbe, + 0xca, 0xfe, 0x45, 0xdc, + 0xff, 0x01, 0xde, 0xad, + 172, 16, 254, 254, + 192, 168, 0, 1, + } + th := &ipv4.Header{ + Version: ipv4.Version, + Len: ipv4.HeaderLen, + TOS: 1, + TotalLen: 0xbeef, + ID: 0xcafe, + Flags: ipv4.DontFragment, + FragOff: 1500, + TTL: 255, + Protocol: 1, + Checksum: 0xdead, + Src: net.IPv4(172, 16, 254, 254), + Dst: net.IPv4(192, 168, 0, 1), + } + var wh []byte + switch runtime.GOOS { + case "darwin": + wh = wireHeaderFromTradBSDKernel[:] + case "freebsd": + if freebsdVersion >= 1000000 { + wh = wireHeaderFromKernel[:] + } else { + wh = wireHeaderFromTradBSDKernel[:] + } + default: + wh = wireHeaderFromKernel[:] + } + h, err := ParseIPv4Header(wh) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, th) { + t.Fatalf("got %#v; want %#v", h, th) + } + }) } } diff --git a/vendor/golang.org/x/net/icmp/message.go b/vendor/golang.org/x/net/icmp/message.go index 81140b0df..46fe95ab0 100644 --- a/vendor/golang.org/x/net/icmp/message.go +++ b/vendor/golang.org/x/net/icmp/message.go @@ -11,6 +11,7 @@ // ICMP extensions for MPLS are defined in RFC 4950. // ICMP extensions for interface and next-hop identification are // defined in RFC 5837. +// PROBE: A utility for probing interfaces is defined in RFC 8335. package icmp // import "golang.org/x/net/icmp" import ( @@ -107,21 +108,25 @@ func (m *Message) Marshal(psh []byte) ([]byte, error) { return b[len(psh):], nil } -var parseFns = map[Type]func(int, []byte) (MessageBody, error){ +var parseFns = map[Type]func(int, Type, []byte) (MessageBody, error){ ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach, ipv4.ICMPTypeTimeExceeded: parseTimeExceeded, ipv4.ICMPTypeParameterProblem: parseParamProb, - ipv4.ICMPTypeEcho: parseEcho, - ipv4.ICMPTypeEchoReply: parseEcho, + ipv4.ICMPTypeEcho: parseEcho, + ipv4.ICMPTypeEchoReply: parseEcho, + ipv4.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest, + ipv4.ICMPTypeExtendedEchoReply: parseExtendedEchoReply, ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach, ipv6.ICMPTypePacketTooBig: parsePacketTooBig, ipv6.ICMPTypeTimeExceeded: parseTimeExceeded, ipv6.ICMPTypeParameterProblem: parseParamProb, - ipv6.ICMPTypeEchoRequest: parseEcho, - ipv6.ICMPTypeEchoReply: parseEcho, + ipv6.ICMPTypeEchoRequest: parseEcho, + ipv6.ICMPTypeEchoReply: parseEcho, + ipv6.ICMPTypeExtendedEchoRequest: parseExtendedEchoRequest, + ipv6.ICMPTypeExtendedEchoReply: parseExtendedEchoReply, } // ParseMessage parses b as an ICMP message. @@ -143,7 +148,7 @@ func ParseMessage(proto int, b []byte) (*Message, error) { if fn, ok := parseFns[m.Type]; !ok { m.Body, err = parseDefaultMessageBody(proto, b[4:]) } else { - m.Body, err = fn(proto, b[4:]) + m.Body, err = fn(proto, m.Type, b[4:]) } if err != nil { return nil, err diff --git a/vendor/golang.org/x/net/icmp/message_test.go b/vendor/golang.org/x/net/icmp/message_test.go index 5d2605f8d..c278b8b83 100644 --- a/vendor/golang.org/x/net/icmp/message_test.go +++ b/vendor/golang.org/x/net/icmp/message_test.go @@ -15,120 +15,141 @@ import ( "golang.org/x/net/ipv6" ) -var marshalAndParseMessageForIPv4Tests = []icmp.Message{ - { - Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv4.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv4.ICMPTypeParameterProblem, Code: 2, - Body: &icmp.ParamProb{ - Pointer: 8, - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv4.ICMPTypeEcho, Code: 0, - Body: &icmp.Echo{ - ID: 1, Seq: 2, - Data: []byte("HELLO-R-U-THERE"), - }, - }, - { - Type: ipv4.ICMPTypePhoturis, - Body: &icmp.DefaultMessageBody{ - Data: []byte{0x80, 0x40, 0x20, 0x10}, - }, - }, -} - -func TestMarshalAndParseMessageForIPv4(t *testing.T) { - for i, tt := range marshalAndParseMessageForIPv4Tests { - b, err := tt.Marshal(nil) - if err != nil { - t.Fatal(err) - } - m, err := icmp.ParseMessage(iana.ProtocolICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - if !reflect.DeepEqual(m.Body, tt.Body) { - t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) - } - } -} - -var marshalAndParseMessageForIPv6Tests = []icmp.Message{ - { - Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypePacketTooBig, Code: 0, - Body: &icmp.PacketTooBig{ - MTU: 1<<16 - 1, - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypeParameterProblem, Code: 2, - Body: &icmp.ParamProb{ - Pointer: 8, - Data: []byte("ERROR-INVOKING-PACKET"), - }, - }, - { - Type: ipv6.ICMPTypeEchoRequest, Code: 0, - Body: &icmp.Echo{ - ID: 1, Seq: 2, - Data: []byte("HELLO-R-U-THERE"), - }, - }, - { - Type: ipv6.ICMPTypeDuplicateAddressConfirmation, - Body: &icmp.DefaultMessageBody{ - Data: []byte{0x80, 0x40, 0x20, 0x10}, - }, - }, -} - -func TestMarshalAndParseMessageForIPv6(t *testing.T) { - pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) - for i, tt := range marshalAndParseMessageForIPv6Tests { - for _, psh := range [][]byte{pshicmp, nil} { - b, err := tt.Marshal(psh) - if err != nil { - t.Fatal(err) +func TestMarshalAndParseMessage(t *testing.T) { + fn := func(t *testing.T, proto int, tms []icmp.Message) { + var pshs [][]byte + switch proto { + case iana.ProtocolICMP: + pshs = [][]byte{nil} + case iana.ProtocolIPv6ICMP: + pshs = [][]byte{ + icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")), + nil, } - m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - if !reflect.DeepEqual(m.Body, tt.Body) { - t.Errorf("#%v: got %v; want %v", i, m.Body, tt.Body) + } + for i, tm := range tms { + for _, psh := range pshs { + b, err := tm.Marshal(psh) + if err != nil { + t.Fatal(err) + } + m, err := icmp.ParseMessage(proto, b) + if err != nil { + t.Fatal(err) + } + if m.Type != tm.Type || m.Code != tm.Code { + t.Errorf("#%d: got %#v; want %#v", i, m, &tm) + } + if !reflect.DeepEqual(m.Body, tm.Body) { + t.Errorf("#%d: got %#v; want %#v", i, m.Body, tm.Body) + } } } } + + t.Run("IPv4", func(t *testing.T) { + fn(t, iana.ProtocolICMP, + []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv4.ICMPTypeEcho, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoReply, Code: 0, + Body: &icmp.ExtendedEchoReply{ + State: 4 /* Delay */, Active: true, IPv4: true, + }, + }, + { + Type: ipv4.ICMPTypePhoturis, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, + }) + }) + t.Run("IPv6", func(t *testing.T) { + fn(t, iana.ProtocolIPv6ICMP, + []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypePacketTooBig, Code: 0, + Body: &icmp.PacketTooBig{ + MTU: 1<<16 - 1, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + }, + }, + { + Type: ipv6.ICMPTypeEchoRequest, Code: 0, + Body: &icmp.Echo{ + ID: 1, Seq: 2, + Data: []byte("HELLO-R-U-THERE"), + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoReply, Code: 0, + Body: &icmp.ExtendedEchoReply{ + State: 5 /* Probe */, Active: true, IPv6: true, + }, + }, + { + Type: ipv6.ICMPTypeDuplicateAddressConfirmation, + Body: &icmp.DefaultMessageBody{ + Data: []byte{0x80, 0x40, 0x20, 0x10}, + }, + }, + }) + }) } diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go index f27135660..9ebbbafe9 100644 --- a/vendor/golang.org/x/net/icmp/multipart.go +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -10,12 +10,14 @@ import "golang.org/x/net/internal/iana" // exts as extensions, and returns a required length for message body // and a required length for a padded original datagram in wire // format. -func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) { +func multipartMessageBodyDataLen(proto int, withOrigDgram bool, b []byte, exts []Extension) (bodyLen, dataLen int) { for _, ext := range exts { bodyLen += ext.Len(proto) } if bodyLen > 0 { - dataLen = multipartMessageOrigDatagramLen(proto, b) + if withOrigDgram { + dataLen = multipartMessageOrigDatagramLen(proto, b) + } bodyLen += 4 // length of extension header } else { dataLen = len(b) @@ -50,8 +52,8 @@ func multipartMessageOrigDatagramLen(proto int, b []byte) int { // marshalMultipartMessageBody takes data as an original datagram and // exts as extesnsions, and returns a binary encoding of message body. // It can be used for non-multipart message bodies when exts is nil. -func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) { - bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts) +func marshalMultipartMessageBody(proto int, withOrigDgram bool, data []byte, exts []Extension) ([]byte, error) { + bodyLen, dataLen := multipartMessageBodyDataLen(proto, withOrigDgram, data, exts) b := make([]byte, 4+bodyLen) copy(b[4:], data) off := dataLen + 4 @@ -71,16 +73,23 @@ func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]by return nil, err } off += ext.Len(proto) + case *InterfaceIdent: + if err := ext.marshal(proto, b[off:]); err != nil { + return nil, err + } + off += ext.Len(proto) } } s := checksum(b[dataLen+4:]) b[dataLen+4+2] ^= byte(s) b[dataLen+4+3] ^= byte(s >> 8) - switch proto { - case iana.ProtocolICMP: - b[1] = byte(dataLen / 4) - case iana.ProtocolIPv6ICMP: - b[0] = byte(dataLen / 8) + if withOrigDgram { + switch proto { + case iana.ProtocolICMP: + b[1] = byte(dataLen / 4) + case iana.ProtocolIPv6ICMP: + b[0] = byte(dataLen / 8) + } } } return b, nil @@ -88,7 +97,7 @@ func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]by // parseMultipartMessageBody parses b as either a non-multipart // message body or a multipart message body. -func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) { +func parseMultipartMessageBody(proto int, typ Type, b []byte) ([]byte, []Extension, error) { var l int switch proto { case iana.ProtocolICMP: @@ -99,11 +108,14 @@ func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) if len(b) == 4 { return nil, nil, nil } - exts, l, err := parseExtensions(b[4:], l) + exts, l, err := parseExtensions(typ, b[4:], l) if err != nil { l = len(b) - 4 } - data := make([]byte, l) - copy(data, b[4:]) + var data []byte + if l > 0 { + data = make([]byte, l) + copy(data, b[4:]) + } return data, exts, nil } diff --git a/vendor/golang.org/x/net/icmp/multipart_test.go b/vendor/golang.org/x/net/icmp/multipart_test.go index 966ccb8da..74408827b 100644 --- a/vendor/golang.org/x/net/icmp/multipart_test.go +++ b/vendor/golang.org/x/net/icmp/multipart_test.go @@ -5,6 +5,7 @@ package icmp_test import ( + "errors" "fmt" "net" "reflect" @@ -16,425 +17,557 @@ import ( "golang.org/x/net/ipv6" ) -var marshalAndParseMultipartMessageForIPv4Tests = []icmp.Message{ - { - Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 1).To4(), - }, - }, - }, - }, - }, - { - Type: ipv4.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 1).To4(), - }, - }, - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - }, - }, - }, - { - Type: ipv4.ICMPTypeParameterProblem, Code: 2, - Body: &icmp.ParamProb{ - Pointer: 8, - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, - }, - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 1).To4(), - }, - }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x2f, - Interface: &net.Interface{ - Index: 16, - Name: "en102", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.IPv4(192, 168, 0, 2).To4(), - }, - }, - }, - }, - }, -} - -func TestMarshalAndParseMultipartMessageForIPv4(t *testing.T) { - for i, tt := range marshalAndParseMultipartMessageForIPv4Tests { - b, err := tt.Marshal(nil) +func TestMarshalAndParseMultipartMessage(t *testing.T) { + fn := func(t *testing.T, proto int, tm icmp.Message) error { + b, err := tm.Marshal(nil) if err != nil { - t.Fatal(err) + return err } - if b[5] != 32 { - t.Errorf("#%v: got %v; want 32", i, b[5]) + switch tm.Type { + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + default: + switch proto { + case iana.ProtocolICMP: + if b[5] != 32 { + return fmt.Errorf("got %d; want 32", b[5]) + } + case iana.ProtocolIPv6ICMP: + if b[4] != 16 { + return fmt.Errorf("got %d; want 16", b[4]) + } + default: + return fmt.Errorf("unknown protocol: %d", proto) + } } - m, err := icmp.ParseMessage(iana.ProtocolICMP, b) + m, err := icmp.ParseMessage(proto, b) if err != nil { - t.Fatal(err) + return err } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) + if m.Type != tm.Type || m.Code != tm.Code { + return fmt.Errorf("got %v; want %v", m, &tm) } switch m.Type { - case ipv4.ICMPTypeDestinationUnreachable: - got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) + case ipv4.ICMPTypeExtendedEchoRequest, ipv6.ICMPTypeExtendedEchoRequest: + got, want := m.Body.(*icmp.ExtendedEchoRequest), tm.Body.(*icmp.ExtendedEchoRequest) if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + case ipv4.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tm.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) } if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + return fmt.Errorf("got %d; want 128", len(got.Data)) } case ipv4.ICMPTypeTimeExceeded: - got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) + got, want := m.Body.(*icmp.TimeExceeded), tm.Body.(*icmp.TimeExceeded) if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) } if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + return fmt.Errorf("got %d; want 128", len(got.Data)) } case ipv4.ICMPTypeParameterProblem: - got, want := m.Body.(*icmp.ParamProb), tt.Body.(*icmp.ParamProb) + got, want := m.Body.(*icmp.ParamProb), tm.Body.(*icmp.ParamProb) if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) } if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) + return fmt.Errorf("got %d; want 128", len(got.Data)) } + case ipv6.ICMPTypeDestinationUnreachable: + got, want := m.Body.(*icmp.DstUnreach), tm.Body.(*icmp.DstUnreach) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + case ipv6.ICMPTypeTimeExceeded: + got, want := m.Body.(*icmp.TimeExceeded), tm.Body.(*icmp.TimeExceeded) + if !reflect.DeepEqual(got.Extensions, want.Extensions) { + return errors.New(dumpExtensions(got.Extensions, want.Extensions)) + } + if len(got.Data) != 128 { + return fmt.Errorf("got %d; want 128", len(got.Data)) + } + default: + return fmt.Errorf("unknown message type: %v", m.Type) } + return nil } -} -var marshalAndParseMultipartMessageForIPv6Tests = []icmp.Message{ - { - Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, - Body: &icmp.DstUnreach{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, + t.Run("IPv4", func(t *testing.T) { + for i, tm := range []icmp.Message{ + { + Type: ipv4.ICMPTypeDestinationUnreachable, Code: 15, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, }, }, }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en101", - }, - }, }, - }, - }, - { - Type: ipv6.ICMPTypeTimeExceeded, Code: 1, - Body: &icmp.TimeExceeded{ - Data: []byte("ERROR-INVOKING-PACKET"), - Extensions: []icmp.Extension{ - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x0f, - Interface: &net.Interface{ - Index: 15, - Name: "en101", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en101", - }, - }, - &icmp.MPLSLabelStack{ - Class: 1, - Type: 1, - Labels: []icmp.MPLSLabel{ - { - Label: 16014, - TC: 0x4, - S: true, - TTL: 255, + { + Type: ipv4.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, }, }, }, - &icmp.InterfaceInfo{ - Class: 2, - Type: 0x2f, - Interface: &net.Interface{ - Index: 16, - Name: "en102", - MTU: 8192, - }, - Addr: &net.IPAddr{ - IP: net.ParseIP("fe80::1"), - Zone: "en102", + }, + { + Type: ipv4.ICMPTypeParameterProblem, Code: 2, + Body: &icmp.ParamProb{ + Pointer: 8, + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 1).To4(), + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.IPv4(192, 168, 0, 2).To4(), + }, + }, }, }, }, - }, - }, -} - -func TestMarshalAndParseMultipartMessageForIPv6(t *testing.T) { - pshicmp := icmp.IPv6PseudoHeader(net.ParseIP("fe80::1"), net.ParseIP("ff02::1")) - for i, tt := range marshalAndParseMultipartMessageForIPv6Tests { - for _, psh := range [][]byte{pshicmp, nil} { - b, err := tt.Marshal(psh) - if err != nil { - t.Fatal(err) - } - if b[4] != 16 { - t.Errorf("#%v: got %v; want 16", i, b[4]) - } - m, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, b) - if err != nil { - t.Fatal(err) - } - if m.Type != tt.Type || m.Code != tt.Code { - t.Errorf("#%v: got %v; want %v", i, m, &tt) - } - switch m.Type { - case ipv6.ICMPTypeDestinationUnreachable: - got, want := m.Body.(*icmp.DstUnreach), tt.Body.(*icmp.DstUnreach) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } - case ipv6.ICMPTypeTimeExceeded: - got, want := m.Body.(*icmp.TimeExceeded), tt.Body.(*icmp.TimeExceeded) - if !reflect.DeepEqual(got.Extensions, want.Extensions) { - t.Error(dumpExtensions(i, got.Extensions, want.Extensions)) - } - if len(got.Data) != 128 { - t.Errorf("#%v: got %v; want 128", i, len(got.Data)) - } + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 2, + Index: 911, + }, + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + }, + }, + }, + { + Type: ipv4.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 3, + AFI: iana.AddrFamily48bitMAC, + Addr: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab}, + }, + }, + }, + }, + } { + if err := fn(t, iana.ProtocolICMP, tm); err != nil { + t.Errorf("#%d: %v", i, err) } } - } + }) + t.Run("IPv6", func(t *testing.T) { + for i, tm := range []icmp.Message{ + { + Type: ipv6.ICMPTypeDestinationUnreachable, Code: 6, + Body: &icmp.DstUnreach{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeTimeExceeded, Code: 1, + Body: &icmp.TimeExceeded{ + Data: []byte("ERROR-INVOKING-PACKET"), + Extensions: []icmp.Extension{ + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x0f, + Interface: &net.Interface{ + Index: 15, + Name: "en101", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en101", + }, + }, + &icmp.MPLSLabelStack{ + Class: 1, + Type: 1, + Labels: []icmp.MPLSLabel{ + { + Label: 16014, + TC: 0x4, + S: true, + TTL: 255, + }, + }, + }, + &icmp.InterfaceInfo{ + Class: 2, + Type: 0x2f, + Interface: &net.Interface{ + Index: 16, + Name: "en102", + MTU: 8192, + }, + Addr: &net.IPAddr{ + IP: net.ParseIP("fe80::1"), + Zone: "en102", + }, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, Local: true, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 1, + Name: "en101", + }, + &icmp.InterfaceIdent{ + Class: 3, + Type: 2, + Index: 911, + }, + }, + }, + }, + { + Type: ipv6.ICMPTypeExtendedEchoRequest, Code: 0, + Body: &icmp.ExtendedEchoRequest{ + ID: 1, Seq: 2, + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Class: 3, + Type: 3, + AFI: iana.AddrFamilyIPv4, + Addr: []byte{192, 0, 2, 1}, + }, + }, + }, + }, + } { + if err := fn(t, iana.ProtocolIPv6ICMP, tm); err != nil { + t.Errorf("#%d: %v", i, err) + } + } + }) } -func dumpExtensions(i int, gotExts, wantExts []icmp.Extension) string { +func dumpExtensions(gotExts, wantExts []icmp.Extension) string { var s string - for j, got := range gotExts { + for i, got := range gotExts { switch got := got.(type) { case *icmp.MPLSLabelStack: - want := wantExts[j].(*icmp.MPLSLabelStack) + want := wantExts[i].(*icmp.MPLSLabelStack) if !reflect.DeepEqual(got, want) { - s += fmt.Sprintf("#%v/%v: got %#v; want %#v\n", i, j, got, want) + s += fmt.Sprintf("#%d: got %#v; want %#v\n", i, got, want) } case *icmp.InterfaceInfo: - want := wantExts[j].(*icmp.InterfaceInfo) + want := wantExts[i].(*icmp.InterfaceInfo) if !reflect.DeepEqual(got, want) { - s += fmt.Sprintf("#%v/%v: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, j, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + s += fmt.Sprintf("#%d: got %#v, %#v, %#v; want %#v, %#v, %#v\n", i, got, got.Interface, got.Addr, want, want.Interface, want.Addr) + } + case *icmp.InterfaceIdent: + want := wantExts[i].(*icmp.InterfaceIdent) + if !reflect.DeepEqual(got, want) { + s += fmt.Sprintf("#%d: got %#v; want %#v\n", i, got, want) } } } + if len(s) == 0 { + return "" + } return s[:len(s)-1] } -var multipartMessageBodyLenTests = []struct { - proto int - in icmp.MessageBody - out int -}{ - { - iana.ProtocolICMP, - &icmp.DstUnreach{ - Data: make([]byte, ipv4.HeaderLen), - }, - 4 + ipv4.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolICMP, - &icmp.TimeExceeded{ - Data: make([]byte, ipv4.HeaderLen), - }, - 4 + ipv4.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, ipv4.HeaderLen), - }, - 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram - }, - - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, ipv4.HeaderLen), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram - }, - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, 128), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram - }, - { - iana.ProtocolICMP, - &icmp.ParamProb{ - Data: make([]byte, 129), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram - }, - - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.PacketTooBig{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // mtu and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.TimeExceeded{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // unused and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.ParamProb{ - Data: make([]byte, ipv6.HeaderLen), - }, - 4 + ipv6.HeaderLen, // pointer and original datagram - }, - - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, 127), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, 128), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram - }, - { - iana.ProtocolIPv6ICMP, - &icmp.DstUnreach{ - Data: make([]byte, 129), - Extensions: []icmp.Extension{ - &icmp.MPLSLabelStack{}, - }, - }, - 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram - }, -} - func TestMultipartMessageBodyLen(t *testing.T) { - for i, tt := range multipartMessageBodyLenTests { + for i, tt := range []struct { + proto int + in icmp.MessageBody + out int + }{ + { + iana.ProtocolICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + }, + 4 + ipv4.HeaderLen, // [pointer, unused] and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv4.HeaderLen), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload, original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolICMP, + &icmp.ParamProb{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 132, // [pointer, length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.PacketTooBig{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // mtu and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.TimeExceeded{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // unused and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ParamProb{ + Data: make([]byte, ipv6.HeaderLen), + }, + 4 + ipv6.HeaderLen, // pointer and original datagram + }, + + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 127), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 128), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 128, // [length, unused], extension header, object header, object payload and original datagram + }, + { + iana.ProtocolIPv6ICMP, + &icmp.DstUnreach{ + Data: make([]byte, 129), + Extensions: []icmp.Extension{ + &icmp.MPLSLabelStack{}, + }, + }, + 4 + 4 + 4 + 0 + 136, // [length, unused], extension header, object header, object payload and original datagram + }, + + { + iana.ProtocolICMP, + &icmp.ExtendedEchoRequest{}, + 4, // [id, seq, l-bit] + }, + { + iana.ProtocolICMP, + &icmp.ExtendedEchoRequest{ + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{}, + }, + }, + 4 + 4 + 4, // [id, seq, l-bit], extension header, object header + }, + { + iana.ProtocolIPv6ICMP, + &icmp.ExtendedEchoRequest{ + Extensions: []icmp.Extension{ + &icmp.InterfaceIdent{ + Type: 3, + AFI: iana.AddrFamilyNSAP, + Addr: []byte{0x49, 0x00, 0x01, 0xaa, 0xaa, 0xbb, 0xbb, 0xcc, 0xcc, 0x00}, + }, + }, + }, + 4 + 4 + 4 + 16, // [id, seq, l-bit], extension header, object header, object payload + }, + } { if out := tt.in.Len(tt.proto); out != tt.out { t.Errorf("#%d: got %d; want %d", i, out, tt.out) } diff --git a/vendor/golang.org/x/net/icmp/packettoobig.go b/vendor/golang.org/x/net/icmp/packettoobig.go index a1c9df7bf..afbf24f1b 100644 --- a/vendor/golang.org/x/net/icmp/packettoobig.go +++ b/vendor/golang.org/x/net/icmp/packettoobig.go @@ -29,7 +29,7 @@ func (p *PacketTooBig) Marshal(proto int) ([]byte, error) { } // parsePacketTooBig parses b as an ICMP packet too big message body. -func parsePacketTooBig(proto int, b []byte) (MessageBody, error) { +func parsePacketTooBig(proto int, _ Type, b []byte) (MessageBody, error) { bodyLen := len(b) if bodyLen < 4 { return nil, errMessageTooShort diff --git a/vendor/golang.org/x/net/icmp/paramprob.go b/vendor/golang.org/x/net/icmp/paramprob.go index 0a2548daa..85872554f 100644 --- a/vendor/golang.org/x/net/icmp/paramprob.go +++ b/vendor/golang.org/x/net/icmp/paramprob.go @@ -21,7 +21,7 @@ func (p *ParamProb) Len(proto int) int { if p == nil { return 0 } - l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) return 4 + l } @@ -33,7 +33,7 @@ func (p *ParamProb) Marshal(proto int) ([]byte, error) { copy(b[4:], p.Data) return b, nil } - b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions) + b, err := marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) if err != nil { return nil, err } @@ -42,7 +42,7 @@ func (p *ParamProb) Marshal(proto int) ([]byte, error) { } // parseParamProb parses b as an ICMP parameter problem message body. -func parseParamProb(proto int, b []byte) (MessageBody, error) { +func parseParamProb(proto int, typ Type, b []byte) (MessageBody, error) { if len(b) < 4 { return nil, errMessageTooShort } @@ -55,7 +55,7 @@ func parseParamProb(proto int, b []byte) (MessageBody, error) { } p.Pointer = uintptr(b[0]) var err error - p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/icmp/ping_test.go b/vendor/golang.org/x/net/icmp/ping_test.go deleted file mode 100644 index 3171dad11..000000000 --- a/vendor/golang.org/x/net/icmp/ping_test.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package icmp_test - -import ( - "errors" - "fmt" - "net" - "os" - "runtime" - "sync" - "testing" - "time" - - "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" - "golang.org/x/net/internal/nettest" - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -func googleAddr(c *icmp.PacketConn, protocol int) (net.Addr, error) { - const host = "www.google.com" - ips, err := net.LookupIP(host) - if err != nil { - return nil, err - } - netaddr := func(ip net.IP) (net.Addr, error) { - switch c.LocalAddr().(type) { - case *net.UDPAddr: - return &net.UDPAddr{IP: ip}, nil - case *net.IPAddr: - return &net.IPAddr{IP: ip}, nil - default: - return nil, errors.New("neither UDPAddr nor IPAddr") - } - } - for _, ip := range ips { - switch protocol { - case iana.ProtocolICMP: - if ip.To4() != nil { - return netaddr(ip) - } - case iana.ProtocolIPv6ICMP: - if ip.To16() != nil && ip.To4() == nil { - return netaddr(ip) - } - } - } - return nil, errors.New("no A or AAAA record") -} - -type pingTest struct { - network, address string - protocol int - mtype icmp.Type -} - -var nonPrivilegedPingTests = []pingTest{ - {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, - - {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, -} - -func TestNonPrivilegedPing(t *testing.T) { - if testing.Short() { - t.Skip("avoid external network") - } - switch runtime.GOOS { - case "darwin": - case "linux": - t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") - default: - t.Skipf("not supported on %s", runtime.GOOS) - } - - for i, tt := range nonPrivilegedPingTests { - if err := doPing(tt, i); err != nil { - t.Error(err) - } - } -} - -var privilegedPingTests = []pingTest{ - {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, - - {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, -} - -func TestPrivilegedPing(t *testing.T) { - if testing.Short() { - t.Skip("avoid external network") - } - if m, ok := nettest.SupportsRawIPSocket(); !ok { - t.Skip(m) - } - - for i, tt := range privilegedPingTests { - if err := doPing(tt, i); err != nil { - t.Error(err) - } - } -} - -func doPing(tt pingTest, seq int) error { - c, err := icmp.ListenPacket(tt.network, tt.address) - if err != nil { - return err - } - defer c.Close() - - dst, err := googleAddr(c, tt.protocol) - if err != nil { - return err - } - - if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { - var f ipv6.ICMPFilter - f.SetAll(true) - f.Accept(ipv6.ICMPTypeDestinationUnreachable) - f.Accept(ipv6.ICMPTypePacketTooBig) - f.Accept(ipv6.ICMPTypeTimeExceeded) - f.Accept(ipv6.ICMPTypeParameterProblem) - f.Accept(ipv6.ICMPTypeEchoReply) - if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { - return err - } - } - - wm := icmp.Message{ - Type: tt.mtype, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), - Data: []byte("HELLO-R-U-THERE"), - }, - } - wb, err := wm.Marshal(nil) - if err != nil { - return err - } - if n, err := c.WriteTo(wb, dst); err != nil { - return err - } else if n != len(wb) { - return fmt.Errorf("got %v; want %v", n, len(wb)) - } - - rb := make([]byte, 1500) - if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { - return err - } - n, peer, err := c.ReadFrom(rb) - if err != nil { - return err - } - rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) - if err != nil { - return err - } - switch rm.Type { - case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: - return nil - default: - return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) - } -} - -func TestConcurrentNonPrivilegedListenPacket(t *testing.T) { - if testing.Short() { - t.Skip("avoid external network") - } - switch runtime.GOOS { - case "darwin": - case "linux": - t.Log("you may need to adjust the net.ipv4.ping_group_range kernel state") - default: - t.Skipf("not supported on %s", runtime.GOOS) - } - - network, address := "udp4", "127.0.0.1" - if !nettest.SupportsIPv4() { - network, address = "udp6", "::1" - } - const N = 1000 - var wg sync.WaitGroup - wg.Add(N) - for i := 0; i < N; i++ { - go func() { - defer wg.Done() - c, err := icmp.ListenPacket(network, address) - if err != nil { - t.Error(err) - return - } - c.Close() - }() - } - wg.Wait() -} diff --git a/vendor/golang.org/x/net/icmp/timeexceeded.go b/vendor/golang.org/x/net/icmp/timeexceeded.go index 344e15848..14e9e23cc 100644 --- a/vendor/golang.org/x/net/icmp/timeexceeded.go +++ b/vendor/golang.org/x/net/icmp/timeexceeded.go @@ -15,23 +15,23 @@ func (p *TimeExceeded) Len(proto int) int { if p == nil { return 0 } - l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions) + l, _ := multipartMessageBodyDataLen(proto, true, p.Data, p.Extensions) return 4 + l } // Marshal implements the Marshal method of MessageBody interface. func (p *TimeExceeded) Marshal(proto int) ([]byte, error) { - return marshalMultipartMessageBody(proto, p.Data, p.Extensions) + return marshalMultipartMessageBody(proto, true, p.Data, p.Extensions) } // parseTimeExceeded parses b as an ICMP time exceeded message body. -func parseTimeExceeded(proto int, b []byte) (MessageBody, error) { +func parseTimeExceeded(proto int, typ Type, b []byte) (MessageBody, error) { if len(b) < 4 { return nil, errMessageTooShort } p := &TimeExceeded{} var err error - p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b) + p.Data, p.Extensions, err = parseMultipartMessageBody(proto, typ, b) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go index c9df24d95..826633e1b 100644 --- a/vendor/golang.org/x/net/internal/iana/const.go +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -1,5 +1,5 @@ // go generate gen.go -// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. // Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). package iana // import "golang.org/x/net/internal/iana" @@ -38,7 +38,7 @@ const ( CongestionExperienced = 0x3 // CE (Congestion Experienced) ) -// Protocol Numbers, Updated: 2016-06-22 +// Protocol Numbers, Updated: 2017-10-13 const ( ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option @@ -178,3 +178,50 @@ const ( ProtocolROHC = 142 // Robust Header Compression ProtocolReserved = 255 // Reserved ) + +// Address Family Numbers, Updated: 2016-10-25 +const ( + AddrFamilyIPv4 = 1 // IP (IP version 4) + AddrFamilyIPv6 = 2 // IP6 (IP version 6) + AddrFamilyNSAP = 3 // NSAP + AddrFamilyHDLC = 4 // HDLC (8-bit multidrop) + AddrFamilyBBN1822 = 5 // BBN 1822 + AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format") + AddrFamilyE163 = 7 // E.163 + AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM) + AddrFamilyF69 = 9 // F.69 (Telex) + AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay) + AddrFamilyIPX = 11 // IPX + AddrFamilyAppletalk = 12 // Appletalk + AddrFamilyDecnetIV = 13 // Decnet IV + AddrFamilyBanyanVines = 14 // Banyan Vines + AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress + AddrFamilyDNS = 16 // DNS (Domain Name System) + AddrFamilyDistinguishedName = 17 // Distinguished Name + AddrFamilyASNumber = 18 // AS Number + AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4 + AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6 + AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP + AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name + AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name + AddrFamilyGWID = 24 // GWID + AddrFamilyL2VPN = 25 // AFI for L2VPN information + AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier + AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier + AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier + AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4 + AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6 + AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family + AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family + AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family + AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF) + AddrFamilyBGPLS = 16388 // BGP-LS + AddrFamily48bitMAC = 16389 // 48-bit MAC + AddrFamily64bitMAC = 16390 // 64-bit MAC + AddrFamilyOUI = 16391 // OUI + AddrFamilyMACFinal24bits = 16392 // MAC/24 + AddrFamilyMACFinal40bits = 16393 // MAC/40 + AddrFamilyIPv6Initial64bits = 16394 // IPv6/64 + AddrFamilyRBridgePortID = 16395 // RBridge Port ID + AddrFamilyTRILLNickname = 16396 // TRILL Nickname +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go index 2a5c310c2..2227e09e8 100644 --- a/vendor/golang.org/x/net/internal/iana/gen.go +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -39,12 +39,16 @@ var registries = []struct { "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", parseProtocolNumbers, }, + { + "http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml", + parseAddrFamilyNumbers, + }, } func main() { var bb bytes.Buffer fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") for _, r := range registries { @@ -291,3 +295,93 @@ func (pn *protocolNumbers) escape() []canonProtocolRecord { } return prs } + +func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var afn addrFamilylNumbers + if err := dec.Decode(&afn); err != nil { + return err + } + afrs := afn.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated) + fmt.Fprintf(w, "const (\n") + for _, afr := range afrs { + if afr.Name == "" { + continue + } + fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value) + fmt.Fprintf(w, "// %s\n", afr.Descr) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type addrFamilylNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonAddrFamilyRecord struct { + Name string + Descr string + Value int +} + +func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord { + afrs := make([]canonAddrFamilyRecord, len(afn.Records)) + sr := strings.NewReplacer( + "IP version 4", "IPv4", + "IP version 6", "IPv6", + "Identifier", "ID", + "-", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, afr := range afn.Records { + if strings.Contains(afr.Descr, "Unassigned") || + strings.Contains(afr.Descr, "Reserved") { + continue + } + afrs[i].Descr = afr.Descr + s := strings.TrimSpace(afr.Descr) + switch s { + case "IP (IP version 4)": + afrs[i].Name = "IPv4" + case "IP6 (IP version 6)": + afrs[i].Name = "IPv6" + case "AFI for L2VPN information": + afrs[i].Name = "L2VPN" + case "E.164 with NSAP format subaddress": + afrs[i].Name = "E164withSubaddress" + case "MT IP: Multi-Topology IP version 4": + afrs[i].Name = "MTIPv4" + case "MAC/24": + afrs[i].Name = "MACFinal24bits" + case "MAC/40": + afrs[i].Name = "MACFinal40bits" + case "IPv6/64": + afrs[i].Name = "IPv6Initial64bits" + default: + n := strings.Index(s, "(") + if n > 0 { + s = s[:n] + } + n = strings.Index(s, ":") + if n > 0 { + s = s[:n] + } + afrs[i].Name = sr.Replace(s) + } + afrs[i].Value, _ = strconv.Atoi(afr.Value) + } + return afrs +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go index 206ea2d11..db60491fe 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -26,6 +26,11 @@ type msghdr struct { Flags int32 } +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + type cmsghdr struct { Len uint32 Level int32 @@ -52,6 +57,7 @@ type sockaddrInet6 struct { const ( sizeofIovec = 0x8 sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 sizeofCmsghdr = 0xc sizeofSockaddrInet = 0x10 diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go index 9d490fac9..1bb1737f6 100644 --- a/vendor/golang.org/x/net/ipv4/gen.go +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -80,7 +80,7 @@ var registries = []struct { func geniana() error { var bb bytes.Buffer fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") fmt.Fprintf(&bb, "package ipv4\n\n") for _, r := range registries { resp, err := http.Get(r.url) diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go index be10c9488..4375b4099 100644 --- a/vendor/golang.org/x/net/ipv4/iana.go +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -1,9 +1,9 @@ // go generate gen.go -// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package ipv4 -// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 const ( ICMPTypeEchoReply ICMPType = 0 // Echo Reply ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable @@ -16,9 +16,11 @@ const ( ICMPTypeTimestamp ICMPType = 13 // Timestamp ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply ) -// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19 +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 var icmpTypes = map[ICMPType]string{ 0: "echo reply", 3: "destination unreachable", @@ -31,4 +33,6 @@ var icmpTypes = map[ICMPType]string{ 13: "timestamp", 14: "timestamp reply", 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", } diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go index 47b7e9f0a..5885664fb 100644 --- a/vendor/golang.org/x/net/ipv6/gen.go +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -80,7 +80,7 @@ var registries = []struct { func geniana() error { var bb bytes.Buffer fmt.Fprintf(&bb, "// go generate gen.go\n") - fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") fmt.Fprintf(&bb, "package ipv6\n\n") for _, r := range registries { resp, err := http.Get(r.url) diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go index 3c6214fb6..32db1aa94 100644 --- a/vendor/golang.org/x/net/ipv6/iana.go +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -1,9 +1,9 @@ // go generate gen.go -// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT +// Code generated by the command above; DO NOT EDIT. package ipv6 -// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 const ( ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big @@ -40,9 +40,11 @@ const ( ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply ) -// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 var icmpTypes = map[ICMPType]string{ 1: "destination unreachable", 2: "packet too big", @@ -79,4 +81,6 @@ var icmpTypes = map[ICMPType]string{ 157: "duplicate address request", 158: "duplicate address confirmation", 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", } diff --git a/vendor/golang.org/x/sync/CONTRIBUTING.md b/vendor/golang.org/x/sync/CONTRIBUTING.md index 88dff59bc..d0485e887 100644 --- a/vendor/golang.org/x/sync/CONTRIBUTING.md +++ b/vendor/golang.org/x/sync/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/sys/CONTRIBUTING.md b/vendor/golang.org/x/sys/CONTRIBUTING.md index 88dff59bc..d0485e887 100644 --- a/vendor/golang.org/x/sys/CONTRIBUTING.md +++ b/vendor/golang.org/x/sys/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 5046cfe7f..163254cee 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -11,11 +11,14 @@ // system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. +// // The primary use of this package is inside other packages that provide a more // portable interface to the system, such as "os", "time" and "net". Use // those packages rather than this one if you can. +// // For details of the functions and data types in this package consult // the manuals for the appropriate operating system. +// // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.ErrorString. diff --git a/vendor/golang.org/x/sys/unix/example_test.go b/vendor/golang.org/x/sys/unix/example_test.go new file mode 100644 index 000000000..10619afdd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/example_test.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix_test + +import ( + "log" + "os" + + "golang.org/x/sys/unix" +) + +func ExampleExec() { + err := unix.Exec("/bin/ls", []string{"ls", "-al"}, os.Environ()) + log.Fatal(err) +} diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 857d2a42d..ef35fce80 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -11,24 +11,27 @@ // system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. +// // The primary use of this package is inside other packages that provide a more // portable interface to the system, such as "os", "time" and "net". Use // those packages rather than this one if you can. +// // For details of the functions and data types in this package consult // the manuals for the appropriate operating system. +// // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. package unix // import "golang.org/x/sys/unix" +import "strings" + // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any // location, it returns (nil, EINVAL). func ByteSliceFromString(s string) ([]byte, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, EINVAL - } + if strings.IndexByte(s, 0) != -1 { + return nil, EINVAL } a := make([]byte, len(s)+1) copy(a, s) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index b9598694c..006e21f5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -330,6 +330,7 @@ func Uname(uname *Utsname) error { //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 777860bf0..b5072de28 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -251,10 +251,12 @@ func Uname(uname *Utsname) error { //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 89f2c3fc1..ba9df4ac1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -12,7 +12,10 @@ package unix -import "unsafe" +import ( + "strings" + "unsafe" +) // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { @@ -134,14 +137,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { // Derive extattr namespace and attribute name func xattrnamespace(fullattr string) (ns int, attr string, err error) { - s := -1 - for idx, val := range fullattr { - if val == '.' { - s = idx - break - } - } - + s := strings.IndexByte(fullattr, '.') if s == -1 { return -1, "", ENOATTR } @@ -482,6 +478,7 @@ func Uname(uname *Utsname) error { //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index c464783d8..a1e8a609b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -7,6 +7,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 15a69cbdd..090ed404a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -9,6 +9,7 @@ package unix //sys Dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT //sys Fstatfs(fd int, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 40b8e4f0f..3d5817f66 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -15,6 +15,7 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) //sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 //sysnb Getegid() (egid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 17c9116e8..6fb8733d6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -9,6 +9,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index a00f99279..78c1e0df1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -7,6 +7,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go index ff7ad82b1..a2bc44015 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -7,44 +7,15 @@ package unix_test import ( - "io/ioutil" "os" "runtime" + "runtime/debug" "testing" "time" "golang.org/x/sys/unix" ) -func TestFchmodat(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - err := os.Symlink("file1", "symlink1") - if err != nil { - t.Fatal(err) - } - - err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0) - if err != nil { - t.Fatalf("Fchmodat: unexpected error: %v", err) - } - - fi, err := os.Stat("file1") - if err != nil { - t.Fatal(err) - } - - if fi.Mode() != 0444 { - t.Errorf("Fchmodat: failed to change mode: expected %v, got %v", 0444, fi.Mode()) - } - - err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, unix.AT_SYMLINK_NOFOLLOW) - if err != unix.EOPNOTSUPP { - t.Fatalf("Fchmodat: unexpected error: %v, expected EOPNOTSUPP", err) - } -} - func TestIoctlGetInt(t *testing.T) { f, err := os.Open("/dev/random") if err != nil { @@ -173,12 +144,46 @@ func TestUtimesNanoAt(t *testing.T) { } } -func TestGetrlimit(t *testing.T) { +func TestRlimitAs(t *testing.T) { + // disable GC during to avoid flaky test + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + var rlim unix.Rlimit err := unix.Getrlimit(unix.RLIMIT_AS, &rlim) if err != nil { t.Fatalf("Getrlimit: %v", err) } + var zero unix.Rlimit + if zero == rlim { + t.Fatalf("Getrlimit: got zero value %#v", rlim) + } + set := rlim + set.Cur = uint64(unix.Getpagesize()) + err = unix.Setrlimit(unix.RLIMIT_AS, &set) + if err != nil { + t.Fatalf("Setrlimit: set failed: %#v %v", set, err) + } + + // RLIMIT_AS was set to the page size, so mmap()'ing twice the page size + // should fail. See 'man 2 getrlimit'. + _, err = unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) + if err == nil { + t.Fatal("Mmap: unexpectedly suceeded after setting RLIMIT_AS") + } + + err = unix.Setrlimit(unix.RLIMIT_AS, &rlim) + if err != nil { + t.Fatalf("Setrlimit: restore failed: %#v %v", rlim, err) + } + + b, err := unix.Mmap(-1, 0, 2*unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE) + if err != nil { + t.Fatalf("Mmap: %v", err) + } + err = unix.Munmap(b) + if err != nil { + t.Fatalf("Munmap: %v", err) + } } func TestSelect(t *testing.T) { @@ -221,47 +226,6 @@ func TestPselect(t *testing.T) { } } -func TestFstatat(t *testing.T) { - defer chtmpdir(t)() - - touch(t, "file1") - - var st1 unix.Stat_t - err := unix.Stat("file1", &st1) - if err != nil { - t.Fatalf("Stat: %v", err) - } - - var st2 unix.Stat_t - err = unix.Fstatat(unix.AT_FDCWD, "file1", &st2, 0) - if err != nil { - t.Fatalf("Fstatat: %v", err) - } - - if st1 != st2 { - t.Errorf("Fstatat: returned stat does not match Stat") - } - - err = os.Symlink("file1", "symlink1") - if err != nil { - t.Fatal(err) - } - - err = unix.Lstat("symlink1", &st1) - if err != nil { - t.Fatalf("Lstat: %v", err) - } - - err = unix.Fstatat(unix.AT_FDCWD, "symlink1", &st2, unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - t.Fatalf("Fstatat: %v", err) - } - - if st1 != st2 { - t.Errorf("Fstatat: returned stat does not match Lstat") - } -} - func TestSchedSetaffinity(t *testing.T) { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -407,37 +371,3 @@ func TestStatx(t *testing.T) { t.Errorf("Statx: returned stat mtime does not match Lstat") } } - -// utilities taken from os/os_test.go - -func touch(t *testing.T, name string) { - f, err := os.Create(name) - if err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } -} - -// chtmpdir changes the working directory to a new temporary directory and -// provides a cleanup function. Used when PWD is read-only. -func chtmpdir(t *testing.T) func() { - oldwd, err := os.Getwd() - if err != nil { - t.Fatalf("chtmpdir: %v", err) - } - d, err := ioutil.TempDir("", "test") - if err != nil { - t.Fatalf("chtmpdir: %v", err) - } - if err := os.Chdir(d); err != nil { - t.Fatalf("chtmpdir: %v", err) - } - return func() { - if err := os.Chdir(oldwd); err != nil { - t.Fatalf("chtmpdir: %v", err) - } - os.RemoveAll(d) - } -} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 71b707838..e1a3baa23 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -233,13 +233,16 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) @@ -320,7 +323,6 @@ func Uname(uname *Utsname) error { // __msync13 // __ntp_gettime30 // __posix_chown -// __posix_fadvise50 // __posix_fchown // __posix_lchown // __posix_rename diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 37556e775..387e1cfcf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -204,10 +204,12 @@ func Uname(uname *Utsname) error { //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index eca8d1d09..f4d2a3451 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -595,9 +595,10 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Flock(fd int, how int) (err error) +//sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) //sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) //sysnb Getgid() (gid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 80b05a406..8c66ae518 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -206,6 +206,20 @@ func GetsockoptInt(fd, level, opt int) (value int, err error) { return int(n), err } +func GetsockoptLinger(fd, level, opt int) (*Linger, error) { + var linger Linger + vallen := _Socklen(SizeofLinger) + err := getsockopt(fd, level, opt, unsafe.Pointer(&linger), &vallen) + return &linger, err +} + +func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) { + var tv Timeval + vallen := _Socklen(unsafe.Sizeof(tv)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&tv), &vallen) + return &tv, err +} + func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -305,3 +319,12 @@ func SetNonblock(fd int, nonblocking bool) (err error) { _, err = fcntl(fd, F_SETFL, flag) return err } + +// Exec calls execve(2), which replaces the calling executable in the process +// tree. argv0 should be the full path to an executable ("/bin/ls") and the +// executable name should also be the first argument in argv (["ls", "-l"]). +// envv are the environment variables that should be passed to the new +// process (["USER=go", "PWD=/tmp"]). +func Exec(argv0 string, argv []string, envv []string) error { + return syscall.Exec(argv0, argv, envv) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_test.go b/vendor/golang.org/x/sys/unix/syscall_unix_test.go index 496e47135..bbdb6fa33 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_test.go @@ -426,6 +426,101 @@ func TestGetwd(t *testing.T) { } } +func TestFstatat(t *testing.T) { + defer chtmpdir(t)() + + touch(t, "file1") + + var st1 unix.Stat_t + err := unix.Stat("file1", &st1) + if err != nil { + t.Fatalf("Stat: %v", err) + } + + var st2 unix.Stat_t + err = unix.Fstatat(unix.AT_FDCWD, "file1", &st2, 0) + if err != nil { + t.Fatalf("Fstatat: %v", err) + } + + if st1 != st2 { + t.Errorf("Fstatat: returned stat does not match Stat") + } + + err = os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } + + err = unix.Lstat("symlink1", &st1) + if err != nil { + t.Fatalf("Lstat: %v", err) + } + + err = unix.Fstatat(unix.AT_FDCWD, "symlink1", &st2, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + t.Fatalf("Fstatat: %v", err) + } + + if st1 != st2 { + t.Errorf("Fstatat: returned stat does not match Lstat") + } +} + +func TestFchmodat(t *testing.T) { + defer chtmpdir(t)() + + touch(t, "file1") + err := os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } + + mode := os.FileMode(0444) + err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), 0) + if err != nil { + t.Fatalf("Fchmodat: unexpected error: %v", err) + } + + fi, err := os.Stat("file1") + if err != nil { + t.Fatal(err) + } + + if fi.Mode() != mode { + t.Errorf("Fchmodat: failed to change file mode: expected %v, got %v", mode, fi.Mode()) + } + + mode = os.FileMode(0644) + didChmodSymlink := true + err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", uint32(mode), unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + if (runtime.GOOS == "linux" || runtime.GOOS == "solaris") && err == unix.EOPNOTSUPP { + // Linux and Illumos don't support flags != 0 + didChmodSymlink = false + } else { + t.Fatalf("Fchmodat: unexpected error: %v", err) + } + } + + if !didChmodSymlink { + // Didn't change mode of the symlink. On Linux, the permissions + // of a symbolic link are always 0777 according to symlink(7) + mode = os.FileMode(0777) + } + + var st unix.Stat_t + err = unix.Lstat("symlink1", &st) + if err != nil { + t.Fatal(err) + } + + got := os.FileMode(st.Mode & 0777) + if got != mode { + t.Errorf("Fchmodat: failed to change symlink mode: expected %v, got %v", mode, got) + } +} + // mktmpfifo creates a temporary FIFO and provides a cleanup function. func mktmpfifo(t *testing.T) (*os.File, func()) { err := unix.Mkfifo("fifo", 0666) @@ -444,3 +539,37 @@ func mktmpfifo(t *testing.T) (*os.File, func()) { os.Remove("fifo") } } + +// utilities taken from os/os_test.go + +func touch(t *testing.T, name string) { + f, err := os.Create(name) + if err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } +} + +// chtmpdir changes the working directory to a new temporary directory and +// provides a cleanup function. Used when PWD is read-only. +func chtmpdir(t *testing.T) func() { + oldwd, err := os.Getwd() + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + d, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatalf("chtmpdir: %v", err) + } + if err := os.Chdir(d); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + return func() { + if err := os.Chdir(oldwd); err != nil { + t.Fatalf("chtmpdir: %v", err) + } + os.RemoveAll(d) + } +} diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index 10aa9b3a4..1494aafcb 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -118,6 +118,17 @@ const ( PathMax = C.PATH_MAX ) +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + // Sockets type RawSockaddrInet4 C.struct_sockaddr_in diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index d96015505..474441b80 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -980,7 +980,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 763ae4fbb..4c9f72756 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index d6808e072..256237773 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 6ae95e6b9..4ae787e49 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index ca6a7ea8b..14ed6886c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -693,6 +693,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index a0241de19..91f36e9ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -618,6 +618,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -659,6 +674,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index fd9ca5a4a..a86434a7b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index a9f18b22d..040e2f760 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 9823e18a1..cddc5e86b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 8f276d65f..8c9e26a0a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1541,6 +1541,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 61169b331..8dc2b58f5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1534,6 +1534,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 4cb59b4a5..e8beef850 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1551,6 +1551,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 0b547ae30..899e4403a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1551,6 +1551,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index cd94d3a83..7a477cbde 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1534,6 +1534,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index cdad555a5..9dc4c7d6d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1551,6 +1551,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 38f4e44b6..f0d1ee125 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1551,6 +1551,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 2dd98434e..c01b3b6ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1222,6 +1222,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 62eadff1c..fb4b96278 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -571,6 +571,16 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 307f4e99e..beac82ef8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -571,6 +571,16 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), 0, uintptr(length), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 61109313c..7bd5f60b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -571,6 +571,16 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 003f820e6..5c09c0758 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index ba0e8f329..54ccc935d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 2ce02c7c4..59258b0a4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index f5d01b3a8..5e88619c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -50,6 +50,7 @@ import ( //go:cgo_import_dynamic libc_flock flock "libc.so" //go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" //go:cgo_import_dynamic libc_fstat fstat "libc.so" +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" //go:cgo_import_dynamic libc_fstatvfs fstatvfs "libc.so" //go:cgo_import_dynamic libc_getdents getdents "libc.so" //go:cgo_import_dynamic libc_getgid getgid "libc.so" @@ -176,6 +177,7 @@ import ( //go:linkname procFlock libc_flock //go:linkname procFpathconf libc_fpathconf //go:linkname procFstat libc_fstat +//go:linkname procFstatat libc_fstatat //go:linkname procFstatvfs libc_fstatvfs //go:linkname procGetdents libc_getdents //go:linkname procGetgid libc_getgid @@ -303,6 +305,7 @@ var ( procFlock, procFpathconf, procFstat, + procFstatat, procFstatvfs, procGetdents, procGetgid, @@ -772,6 +775,19 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index bc4bc89f8..327af5fba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -136,13 +136,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -295,14 +295,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -338,51 +338,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -430,11 +430,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d8abcab12..116e6e075 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -26,9 +26,9 @@ type Timespec struct { } type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte + Sec int64 + Usec int32 + _ [4]byte } type Timeval32 struct { @@ -70,7 +70,7 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - Pad_cgo_0 [4]byte + _ [4]byte Atimespec Timespec Mtimespec Timespec Ctimespec Timespec @@ -120,9 +120,9 @@ type Fstore_t struct { } type Radvisory_t struct { - Offset int64 - Count int32 - Pad_cgo_0 [4]byte + Offset int64 + Count int32 + _ [4]byte } type Fbootstraptransfer_t struct { @@ -132,9 +132,9 @@ type Fbootstraptransfer_t struct { } type Log2phys_t struct { - Flags uint32 - Pad_cgo_0 [8]byte - Pad_cgo_1 [8]byte + Flags uint32 + _ [8]byte + _ [8]byte } type Fsid struct { @@ -142,13 +142,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -221,10 +221,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -303,14 +303,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -346,51 +346,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -426,9 +426,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -439,22 +439,22 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval32 - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]uint8 - Pad_cgo_0 [4]byte - Ispeed uint64 - Ospeed uint64 + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + _ [4]byte + Ispeed uint64 + Ospeed uint64 } type Winsize struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 9749c9f7d..2750ad760 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -137,13 +137,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -296,14 +296,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -339,51 +339,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -431,11 +431,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 810b0bd4f..8cead0996 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -26,9 +26,9 @@ type Timespec struct { } type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte + Sec int64 + Usec int32 + _ [4]byte } type Timeval32 struct { @@ -70,7 +70,7 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - Pad_cgo_0 [4]byte + _ [4]byte Atimespec Timespec Mtimespec Timespec Ctimespec Timespec @@ -120,9 +120,9 @@ type Fstore_t struct { } type Radvisory_t struct { - Offset int64 - Count int32 - Pad_cgo_0 [4]byte + Offset int64 + Count int32 + _ [4]byte } type Fbootstraptransfer_t struct { @@ -132,9 +132,9 @@ type Fbootstraptransfer_t struct { } type Log2phys_t struct { - Flags uint32 - Pad_cgo_0 [8]byte - Pad_cgo_1 [8]byte + Flags uint32 + _ [8]byte + _ [8]byte } type Fsid struct { @@ -142,13 +142,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -221,10 +221,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -303,14 +303,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -346,51 +346,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -426,9 +426,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -439,22 +439,22 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval32 - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]uint8 - Pad_cgo_0 [4]byte - Ispeed uint64 - Ospeed uint64 + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + _ [4]byte + Ispeed uint64 + Ospeed uint64 } type Winsize struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index e3b8ebb01..315a553bd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -108,7 +108,7 @@ type Statfs_t struct { Owner uint32 Type int32 Flags int32 - Pad_cgo_0 [4]byte + _ [4]byte Syncwrites int64 Asyncwrites int64 Fstypename [16]int8 @@ -118,7 +118,7 @@ type Statfs_t struct { Spares1 int16 Mntfromname [80]int8 Spares2 int16 - Pad_cgo_1 [4]byte + _ [4]byte Spare [2]int64 } @@ -219,10 +219,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -294,14 +294,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -311,7 +311,7 @@ type IfData struct { Hdrlen uint8 Recvquota uint8 Xmitquota uint8 - Pad_cgo_0 [2]byte + _ [2]byte Mtu uint64 Metric uint64 Link_state uint64 @@ -333,24 +333,24 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfAnnounceMsghdr struct { @@ -363,19 +363,19 @@ type IfAnnounceMsghdr struct { } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint64 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint64 + Rmx RtMetrics } type RtMetrics struct { @@ -391,7 +391,7 @@ type RtMetrics struct { Hopcount uint64 Mssopt uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Msl uint64 Iwmaxsegs uint64 Iwcapsegs uint64 @@ -416,9 +416,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -429,11 +429,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [6]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 9dbbb1ce5..8e7384b89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -376,97 +376,123 @@ const ( ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2a - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 ) type NlMsghdr struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index da70faa82..4b86fb2b3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -103,6 +103,15 @@ const ( PathMax = 0x400 ) +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 0963ab8c4..9048a509d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -107,6 +107,15 @@ const ( PathMax = 0x400 ) +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 211f64193..00525e7b0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -108,6 +108,15 @@ const ( PathMax = 0x400 ) +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index d44545248..2248598d0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -93,40 +93,40 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - Size int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Blksize int32 - Pad_cgo_0 [4]byte - Blocks int64 - Fstype [16]int8 + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int32 + _ [4]byte + Blocks int64 + Fstype [16]int8 } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Sysid int32 - Pid int32 - Pad [4]int64 + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Sysid int32 + Pid int32 + Pad [4]int64 } type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Name [1]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Name [1]int8 + _ [5]byte } type _Fsblkcnt_t uint64 @@ -213,13 +213,13 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Accrights *int8 Accrightslen int32 - Pad_cgo_2 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -271,11 +271,11 @@ type Utsname struct { } type Ustat_t struct { - Tfree int64 - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_0 [4]byte + Tfree int64 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte } const ( @@ -295,21 +295,21 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { Type uint8 Addrlen uint8 Hdrlen uint8 - Pad_cgo_0 [1]byte + _ [1]byte Mtu uint32 Metric uint32 Baudrate uint32 @@ -328,30 +328,30 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -388,9 +388,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -406,30 +406,30 @@ type BpfTimeval struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [19]uint8 - Pad_cgo_0 [1]byte + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + _ [1]byte } type Termio struct { - Iflag uint16 - Oflag uint16 - Cflag uint16 - Lflag uint16 - Line int8 - Cc [8]uint8 - Pad_cgo_0 [1]byte + Iflag uint16 + Oflag uint16 + Cflag uint16 + Lflag uint16 + Line int8 + Cc [8]uint8 + _ [1]byte } type Winsize struct { diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index b07bc2305..af828a91b 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -11,11 +11,14 @@ // system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. +// // The primary use of this package is inside other packages that provide a more // portable interface to the system, such as "os", "time" and "net". Use // those packages rather than this one if you can. +// // For details of the functions and data types in this package consult // the manuals for the appropriate operating system. +// // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. diff --git a/vendor/google.golang.org/api/admin/directory/v1/admin-api.json b/vendor/google.golang.org/api/admin/directory/v1/admin-api.json index 865cac864..85a4c746b 100644 --- a/vendor/google.golang.org/api/admin/directory/v1/admin-api.json +++ b/vendor/google.golang.org/api/admin/directory/v1/admin-api.json @@ -93,7 +93,7 @@ "description": "The Admin SDK Directory API lets you view and manage enterprise resources such as users and groups, administrative notifications, security features, and more.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/admin-sdk/directory/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/FEIM1JSTcD4HhoBpMoEeP9vkMCQ\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/ny7NvsE3PzK3etgzPNk9J8KeI7k\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -1166,7 +1166,7 @@ ] }, "hasMember": { - "description": "Checks Membership of an user within a Group", + "description": "Checks whether the given user is a member of the group. Membership can be direct or nested.", "httpMethod": "GET", "id": "directory.members.hasMember", "parameterOrder": [ @@ -1175,13 +1175,13 @@ ], "parameters": { "groupKey": { - "description": "Email or immutable Id of the group", + "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", "location": "path", "required": true, "type": "string" }, "memberKey": { - "description": "Email or immutable Id of the member", + "description": "Identifies the user member in the API request. The value can be the user's primary email address, alias, or unique ID.", "location": "path", "required": true, "type": "string" @@ -2056,6 +2056,19 @@ "location": "path", "required": true, "type": "string" + }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, + "pageToken": { + "description": "Token to specify the next page in the list.", + "location": "query", + "type": "string" } }, "path": "customer/{customer}/resources/buildings", @@ -2254,7 +2267,7 @@ "type": "string" }, "query": { - "description": "String query used to filter results. Should be of the form \"field operator value\" where field can be any of supported fields and operators can be any of supported operations. Operators include '=' for exact match and ':' for prefix match where applicable. For prefix match, the value should always be followed by a *.", + "description": "String query used to filter results. Should be of the form \"field operator value\" where field can be any of supported fields and operators can be any of supported operations. Operators include '=' for exact match and ':' for prefix match or HAS match where applicable. For prefix match, the value should always be followed by a *. Supported fields include generatedResourceName, name, buildingId, featureInstances.feature.name. For example buildingId=US-NYC-9TH AND featureInstances.feature.name:Phone.", "location": "query", "type": "string" } @@ -2436,6 +2449,14 @@ "required": true, "type": "string" }, + "maxResults": { + "description": "Maximum number of results to return.", + "format": "int32", + "location": "query", + "maximum": "500", + "minimum": "1", + "type": "integer" + }, "pageToken": { "description": "Token to specify the next page in the list.", "location": "query", @@ -3914,7 +3935,7 @@ } } }, - "revision": "20171212", + "revision": "20180316", "rootUrl": "https://www.googleapis.com/", "schemas": { "Alias": { @@ -4353,7 +4374,7 @@ "type": "string" }, "downloadUrl": { - "description": "File downlod URL", + "description": "File download URL", "type": "string" }, "name": { @@ -4463,23 +4484,30 @@ "type": "string" }, "tpmVersionInfo": { + "description": "Trusted Platform Module (TPM) (Read-only)", "properties": { "family": { + "description": "TPM family.", "type": "string" }, "firmwareVersion": { + "description": "TPM firmware version.", "type": "string" }, "manufacturer": { + "description": "TPM manufacturer code.", "type": "string" }, "specLevel": { + "description": "TPM specification level.", "type": "string" }, "tpmModel": { + "description": "TPM model number.", "type": "string" }, "vendorSpecific": { + "description": "Vendor-specific information such as Vendor ID.", "type": "string" } }, @@ -4804,7 +4832,8 @@ "id": "FeatureInstance", "properties": { "feature": { - "$ref": "Feature" + "$ref": "Feature", + "description": "The feature that this is an instance of. A calendar resource may have multiple instances of a feature." } }, "type": "object" @@ -5007,7 +5036,7 @@ "id": "MembersHasMember", "properties": { "isMember": { - "description": "Identifies whether given user is a member or not.", + "description": "Identifies whether the given user is a member of the group. Membership can be direct or nested.", "readOnly": true, "type": "boolean" } diff --git a/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go b/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go index 28fa2d189..90fcc2848 100644 --- a/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go +++ b/vendor/google.golang.org/api/admin/directory/v1/admin-gen.go @@ -1132,6 +1132,7 @@ type ChromeOsDevice struct { // SupportEndDate: Final date the device will be supported (Read-only) SupportEndDate string `json:"supportEndDate,omitempty"` + // TpmVersionInfo: Trusted Platform Module (TPM) (Read-only) TpmVersionInfo *ChromeOsDeviceTpmVersionInfo `json:"tpmVersionInfo,omitempty"` // WillAutoRenew: Will Chromebook auto renew after support end date @@ -1200,7 +1201,7 @@ type ChromeOsDeviceDeviceFiles struct { // CreateTime: Date and time the file was created CreateTime string `json:"createTime,omitempty"` - // DownloadUrl: File downlod URL + // DownloadUrl: File download URL DownloadUrl string `json:"downloadUrl,omitempty"` // Name: File name @@ -1263,17 +1264,25 @@ func (s *ChromeOsDeviceRecentUsers) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ChromeOsDeviceTpmVersionInfo: Trusted Platform Module (TPM) +// (Read-only) type ChromeOsDeviceTpmVersionInfo struct { + // Family: TPM family. Family string `json:"family,omitempty"` + // FirmwareVersion: TPM firmware version. FirmwareVersion string `json:"firmwareVersion,omitempty"` + // Manufacturer: TPM manufacturer code. Manufacturer string `json:"manufacturer,omitempty"` + // SpecLevel: TPM specification level. SpecLevel string `json:"specLevel,omitempty"` + // TpmModel: TPM model number. TpmModel string `json:"tpmModel,omitempty"` + // VendorSpecific: Vendor-specific information such as Vendor ID. VendorSpecific string `json:"vendorSpecific,omitempty"` // ForceSendFields is a list of field names (e.g. "Family") to @@ -1740,6 +1749,8 @@ func (s *Feature) MarshalJSON() ([]byte, error) { // FeatureInstance: JSON template for a "feature instance". type FeatureInstance struct { + // Feature: The feature that this is an instance of. A calendar resource + // may have multiple instances of a feature. Feature *Feature `json:"feature,omitempty"` // ForceSendFields is a list of field names (e.g. "Feature") to @@ -2035,7 +2046,8 @@ func (s *Members) MarshalJSON() ([]byte, error) { // MembersHasMember: JSON template for Has Member response in Directory // API. type MembersHasMember struct { - // IsMember: Identifies whether given user is a member or not. + // IsMember: Identifies whether the given user is a member of the group. + // Membership can be direct or nested. IsMember bool `json:"isMember,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -8766,7 +8778,8 @@ type MembersHasMemberCall struct { header_ http.Header } -// HasMember: Checks Membership of an user within a Group +// HasMember: Checks whether the given user is a member of the group. +// Membership can be direct or nested. func (r *MembersService) HasMember(groupKey string, memberKey string) *MembersHasMemberCall { c := &MembersHasMemberCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.groupKey = groupKey @@ -8869,7 +8882,7 @@ func (c *MembersHasMemberCall) Do(opts ...googleapi.CallOption) (*MembersHasMemb } return ret, nil // { - // "description": "Checks Membership of an user within a Group", + // "description": "Checks whether the given user is a member of the group. Membership can be direct or nested.", // "httpMethod": "GET", // "id": "directory.members.hasMember", // "parameterOrder": [ @@ -8878,13 +8891,13 @@ func (c *MembersHasMemberCall) Do(opts ...googleapi.CallOption) (*MembersHasMemb // ], // "parameters": { // "groupKey": { - // "description": "Email or immutable Id of the group", + // "description": "Identifies the group in the API request. The value can be the group's email address, group alias, or the unique group ID.", // "location": "path", // "required": true, // "type": "string" // }, // "memberKey": { - // "description": "Email or immutable Id of the member", + // "description": "Identifies the user member in the API request. The value can be the user's primary email address, alias, or unique ID.", // "location": "path", // "required": true, // "type": "string" @@ -12578,6 +12591,20 @@ func (r *ResourcesBuildingsService) List(customer string) *ResourcesBuildingsLis return c } +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *ResourcesBuildingsListCall) MaxResults(maxResults int64) *ResourcesBuildingsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Token to specify +// the next page in the list. +func (c *ResourcesBuildingsListCall) PageToken(pageToken string) *ResourcesBuildingsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -12684,6 +12711,19 @@ func (c *ResourcesBuildingsListCall) Do(opts ...googleapi.CallOption) (*Building // "location": "path", // "required": true, // "type": "string" + // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Token to specify the next page in the list.", + // "location": "query", + // "type": "string" // } // }, // "path": "customer/{customer}/resources/buildings", @@ -12698,6 +12738,27 @@ func (c *ResourcesBuildingsListCall) Do(opts ...googleapi.CallOption) (*Building } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ResourcesBuildingsListCall) Pages(ctx context.Context, f func(*Buildings) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "directory.resources.buildings.patch": type ResourcesBuildingsPatchCall struct { @@ -13418,8 +13479,11 @@ func (c *ResourcesCalendarsListCall) PageToken(pageToken string) *ResourcesCalen // filter results. Should be of the form "field operator value" where // field can be any of supported fields and operators can be any of // supported operations. Operators include '=' for exact match and ':' -// for prefix match where applicable. For prefix match, the value should -// always be followed by a *. +// for prefix match or HAS match where applicable. For prefix match, the +// value should always be followed by a *. Supported fields include +// generatedResourceName, name, buildingId, +// featureInstances.feature.name. For example buildingId=US-NYC-9TH AND +// featureInstances.feature.name:Phone. func (c *ResourcesCalendarsListCall) Query(query string) *ResourcesCalendarsListCall { c.urlParams_.Set("query", query) return c @@ -13551,7 +13615,7 @@ func (c *ResourcesCalendarsListCall) Do(opts ...googleapi.CallOption) (*Calendar // "type": "string" // }, // "query": { - // "description": "String query used to filter results. Should be of the form \"field operator value\" where field can be any of supported fields and operators can be any of supported operations. Operators include '=' for exact match and ':' for prefix match where applicable. For prefix match, the value should always be followed by a *.", + // "description": "String query used to filter results. Should be of the form \"field operator value\" where field can be any of supported fields and operators can be any of supported operations. Operators include '=' for exact match and ':' for prefix match or HAS match where applicable. For prefix match, the value should always be followed by a *. Supported fields include generatedResourceName, name, buildingId, featureInstances.feature.name. For example buildingId=US-NYC-9TH AND featureInstances.feature.name:Phone.", // "location": "query", // "type": "string" // } @@ -14287,6 +14351,13 @@ func (r *ResourcesFeaturesService) List(customer string) *ResourcesFeaturesListC return c } +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to return. +func (c *ResourcesFeaturesListCall) MaxResults(maxResults int64) *ResourcesFeaturesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + // PageToken sets the optional parameter "pageToken": Token to specify // the next page in the list. func (c *ResourcesFeaturesListCall) PageToken(pageToken string) *ResourcesFeaturesListCall { @@ -14401,6 +14472,14 @@ func (c *ResourcesFeaturesListCall) Do(opts ...googleapi.CallOption) (*Features, // "required": true, // "type": "string" // }, + // "maxResults": { + // "description": "Maximum number of results to return.", + // "format": "int32", + // "location": "query", + // "maximum": "500", + // "minimum": "1", + // "type": "integer" + // }, // "pageToken": { // "description": "Token to specify the next page in the list.", // "location": "query", diff --git a/vendor/google.golang.org/api/analytics/v3/analytics-api.json b/vendor/google.golang.org/api/analytics/v3/analytics-api.json index 021d62b12..03d575dac 100644 --- a/vendor/google.golang.org/api/analytics/v3/analytics-api.json +++ b/vendor/google.golang.org/api/analytics/v3/analytics-api.json @@ -29,7 +29,7 @@ "description": "Views and manages your Google Analytics data.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/analytics/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/o3DGRejYlQONYxz9qTIt_cgX6AA\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/pZyXISvfodVzorQpGHmIl5L61w8\"", "icons": { "x16": "https://www.google.com/images/icons/product/analytics-16.png", "x32": "https://www.google.com/images/icons/product/analytics-32.png" @@ -3745,7 +3745,7 @@ "description": "Provision account.", "httpMethod": "POST", "id": "analytics.provisioning.createAccountTree", - "path": "provisioning/createAccount", + "path": "provisioning/createAccountTree", "request": { "$ref": "AccountTreeRequest" }, @@ -3759,7 +3759,7 @@ } } }, - "revision": "20171211", + "revision": "20180309", "rootUrl": "https://www.googleapis.com/", "schemas": { "Account": { diff --git a/vendor/google.golang.org/api/analytics/v3/analytics-gen.go b/vendor/google.golang.org/api/analytics/v3/analytics-gen.go index 4422cfe70..23aa740f3 100644 --- a/vendor/google.golang.org/api/analytics/v3/analytics-gen.go +++ b/vendor/google.golang.org/api/analytics/v3/analytics-gen.go @@ -19274,7 +19274,7 @@ func (c *ProvisioningCreateAccountTreeCall) doRequest(alt string) (*http.Respons } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) - urls := googleapi.ResolveRelative(c.s.BasePath, "provisioning/createAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "provisioning/createAccountTree") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) req.Header = reqHeaders @@ -19322,7 +19322,7 @@ func (c *ProvisioningCreateAccountTreeCall) Do(opts ...googleapi.CallOption) (*A // "description": "Provision account.", // "httpMethod": "POST", // "id": "analytics.provisioning.createAccountTree", - // "path": "provisioning/createAccount", + // "path": "provisioning/createAccountTree", // "request": { // "$ref": "AccountTreeRequest" // }, diff --git a/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-api.json b/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-api.json index ba45e71fa..7c0bda59f 100644 --- a/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-api.json +++ b/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-api.json @@ -486,7 +486,7 @@ ], "parameters": { "partnerId": { - "description": "The ID of the partner.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -504,7 +504,7 @@ "devices": { "methods": { "claim": { - "description": "Claim the device identified by device identifier.", + "description": "Claims a device for a customer and adds it to zero-touch enrollment. If the\ndevice is already claimed by another customer, the call returns an error.", "flatPath": "v1/partners/{partnersId}/devices:claim", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.claim", @@ -513,7 +513,7 @@ ], "parameters": { "partnerId": { - "description": "ID of the partner.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -530,7 +530,7 @@ } }, "claimAsync": { - "description": "Claim devices asynchronously.", + "description": "Claims a batch of devices for a customer asynchronously. Adds the devices\nto zero-touch enrollment. To learn more, read [Long‑running batch\noperations](/zero-touch/guides/how-it-works#operations).", "flatPath": "v1/partners/{partnersId}/devices:claimAsync", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.claimAsync", @@ -539,7 +539,7 @@ ], "parameters": { "partnerId": { - "description": "Partner ID.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -556,7 +556,7 @@ } }, "findByIdentifier": { - "description": "Find devices by device identifier.", + "description": "Finds devices by hardware identifiers, such as IMEI.", "flatPath": "v1/partners/{partnersId}/devices:findByIdentifier", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.findByIdentifier", @@ -565,7 +565,7 @@ ], "parameters": { "partnerId": { - "description": "ID of the partner.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -582,7 +582,7 @@ } }, "findByOwner": { - "description": "Find devices by ownership.", + "description": "Finds devices claimed for customers. The results only contain devices\nregistered to the reseller that's identified by the `partnerId` argument.\nThe customer's devices purchased from other resellers don't appear in the\nresults.", "flatPath": "v1/partners/{partnersId}/devices:findByOwner", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.findByOwner", @@ -591,7 +591,7 @@ ], "parameters": { "partnerId": { - "description": "ID of the partner.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -608,7 +608,7 @@ } }, "get": { - "description": "Get a device.", + "description": "Gets a device.", "flatPath": "v1/partners/{partnersId}/devices/{devicesId}", "httpMethod": "GET", "id": "androiddeviceprovisioning.partners.devices.get", @@ -617,7 +617,7 @@ ], "parameters": { "name": { - "description": "Resource name in `partners/[PARTNER_ID]/devices/[DEVICE_ID]`.", + "description": "Required. The device API resource name in the format\n`partners/[PARTNER_ID]/devices/[DEVICE_ID]`.", "location": "path", "pattern": "^partners/[^/]+/devices/[^/]+$", "required": true, @@ -630,7 +630,7 @@ } }, "metadata": { - "description": "Update the metadata.", + "description": "Updates reseller metadata associated with the device.", "flatPath": "v1/partners/{partnersId}/devices/{devicesId}/metadata", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.metadata", @@ -640,7 +640,7 @@ ], "parameters": { "deviceId": { - "description": "ID of the partner.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -648,7 +648,7 @@ "type": "string" }, "metadataOwnerId": { - "description": "The owner of the newly set metadata. Set this to the partner ID.", + "description": "Required. The owner of the newly set metadata. Set this to the partner ID.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -665,7 +665,7 @@ } }, "unclaim": { - "description": "Unclaim the device identified by the `device_id` or the `deviceIdentifier`.", + "description": "Unclaims a device from a customer and removes it from zero-touch\nenrollment.", "flatPath": "v1/partners/{partnersId}/devices:unclaim", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.unclaim", @@ -674,7 +674,7 @@ ], "parameters": { "partnerId": { - "description": "ID of the partner.", + "description": "Required. The ID of the reseller partner.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -691,7 +691,7 @@ } }, "unclaimAsync": { - "description": "Unclaim devices asynchronously.", + "description": "Unclaims a batch of devices for a customer asynchronously. Removes the\ndevices from zero-touch enrollment. To learn more, read [Long‑running batch\noperations](/zero-touch/guides/how-it-works#operations).", "flatPath": "v1/partners/{partnersId}/devices:unclaimAsync", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.unclaimAsync", @@ -700,7 +700,7 @@ ], "parameters": { "partnerId": { - "description": "Partner ID.", + "description": "Required. The reseller partner ID.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -717,7 +717,7 @@ } }, "updateMetadataAsync": { - "description": "Set metadata in batch asynchronously.", + "description": "Updates the reseller metadata attached to a batch of devices. This method\nupdates devices asynchronously and returns an `Operation` that can be used\nto track progress. Read [Long‑running batch\noperations](/zero-touch/guides/how-it-works#operations).", "flatPath": "v1/partners/{partnersId}/devices:updateMetadataAsync", "httpMethod": "POST", "id": "androiddeviceprovisioning.partners.devices.updateMetadataAsync", @@ -726,7 +726,7 @@ ], "parameters": { "partnerId": { - "description": "Partner ID.", + "description": "Required. The reseller partner ID.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -747,7 +747,7 @@ } } }, - "revision": "20180203", + "revision": "20180317", "rootUrl": "https://androiddeviceprovisioning.googleapis.com/", "schemas": { "ClaimDeviceRequest": { @@ -755,16 +755,16 @@ "id": "ClaimDeviceRequest", "properties": { "customerId": { - "description": "The customer to claim for.", + "description": "Required. The ID of the customer for whom the device is being claimed.", "format": "int64", "type": "string" }, "deviceIdentifier": { "$ref": "DeviceIdentifier", - "description": "The device identifier of the device to claim." + "description": "Required. The device identifier of the device to claim." }, "sectionType": { - "description": "The section to claim.", + "description": "Required. The section type of the device's provisioning record.", "enum": [ "SECTION_TYPE_UNSPECIFIED", "SECTION_TYPE_ZERO_TOUCH" @@ -795,11 +795,11 @@ "type": "object" }, "ClaimDevicesRequest": { - "description": "Request to claim devices asynchronously in batch.", + "description": "Request to claim devices asynchronously in batch. Claiming a device adds the\ndevice to zero-touch enrollment and shows the device in the customer's view\nof the portal.", "id": "ClaimDevicesRequest", "properties": { "claims": { - "description": "List of claims.", + "description": "Required. A list of device claims.", "items": { "$ref": "PartnerClaim" }, @@ -1115,16 +1115,16 @@ "type": "object" }, "DevicesLongRunningOperationMetadata": { - "description": "Long running operation metadata.", + "description": "Tracks the status of a long-running operation to asynchronously update a\nbatch of reseller metadata attached to devices. To learn more, read\n[Long‑running batch operations](/zero-touch/guides/how-it-works#operations).", "id": "DevicesLongRunningOperationMetadata", "properties": { "devicesCount": { - "description": "Number of devices parsed in your requests.", + "description": "The number of metadata updates in the operation. This might be different\nfrom the number of updates in the request if the API can't parse some of\nthe updates.", "format": "int32", "type": "integer" }, "processingStatus": { - "description": "The overall processing status.", + "description": "The processing status of the operation.", "enum": [ "BATCH_PROCESS_STATUS_UNSPECIFIED", "BATCH_PROCESS_PENDING", @@ -1140,7 +1140,7 @@ "type": "string" }, "progress": { - "description": "Processing progress from 0 to 100.", + "description": "The processing progress of the operation. Measured as a number from 0 to\n100. A value of 10O doesnt always mean the operation completed—check for\nthe inclusion of a `done` field.", "format": "int32", "type": "integer" } @@ -1148,18 +1148,18 @@ "type": "object" }, "DevicesLongRunningOperationResponse": { - "description": "Long running operation response.", + "description": "Tracks the status of a long-running operation to claim, unclaim, or attach\nmetadata to devices. To learn more, read\n[Long‑running batch operations](/zero-touch/guides/how-it-works#operations).", "id": "DevicesLongRunningOperationResponse", "properties": { "perDeviceStatus": { - "description": "Processing status for each device.\nOne `PerDeviceStatus` per device. The order is the same as in your requests.", + "description": "The processing status for each device in the operation.\nOne `PerDeviceStatus` per device. The list order matches the items in the\noriginal request.", "items": { "$ref": "OperationPerDevice" }, "type": "array" }, "successCount": { - "description": "Number of succeesfully processed ones.", + "description": "A summary of how many items in the operation the server processed\nsuccessfully. Updated as the operation progresses.", "format": "int32", "type": "integer" } @@ -1197,15 +1197,15 @@ "properties": { "deviceIdentifier": { "$ref": "DeviceIdentifier", - "description": "The device identifier to search." + "description": "Required. The device identifier to search for." }, "limit": { - "description": "Number of devices to show.", + "description": "Required. The maximum number of devices to show in a page of results. Must\nbe between 1 and 100 inclusive.", "format": "int64", "type": "string" }, "pageToken": { - "description": "Page token.", + "description": "A token specifying which result page to return.", "type": "string" } }, @@ -1223,7 +1223,7 @@ "type": "array" }, "nextPageToken": { - "description": "Page token of the next page.", + "description": "A token used to access the next page of results. Omitted if no further\nresults are available.", "type": "string" } }, @@ -1234,7 +1234,7 @@ "id": "FindDevicesByOwnerRequest", "properties": { "customerId": { - "description": "List of customer IDs to search for.", + "description": "Required. The list of customer IDs to search for.", "items": { "format": "int64", "type": "string" @@ -1242,16 +1242,16 @@ "type": "array" }, "limit": { - "description": "The number of devices to show in the result.", + "description": "Required. The maximum number of devices to show in a page of results. Must\nbe between 1 and 100 inclusive.", "format": "int64", "type": "string" }, "pageToken": { - "description": "Page token.", + "description": "A token specifying which result page to return.", "type": "string" }, "sectionType": { - "description": "The section type.", + "description": "Required. The section type of the device's provisioning record.", "enum": [ "SECTION_TYPE_UNSPECIFIED", "SECTION_TYPE_ZERO_TOUCH" @@ -1270,14 +1270,14 @@ "id": "FindDevicesByOwnerResponse", "properties": { "devices": { - "description": "Devices found.", + "description": "The customer's devices.", "items": { "$ref": "Device" }, "type": "array" }, "nextPageToken": { - "description": "Page token of the next page.", + "description": "A token used to access the next page of results.\nOmitted if no further results are available.", "type": "string" } }, @@ -1288,7 +1288,7 @@ "id": "ListCustomersResponse", "properties": { "customers": { - "description": "List of customers related to this partner.", + "description": "List of customers related to this reseller partner.", "items": { "$ref": "Company" }, @@ -1333,24 +1333,24 @@ "type": "object" }, "OperationPerDevice": { - "description": "Operation the server received for every device.", + "description": "A task for each device in the operation. Corresponds to each device\nchange in the request.", "id": "OperationPerDevice", "properties": { "claim": { "$ref": "PartnerClaim", - "description": "Request to claim a device." + "description": "A copy of the original device-claim request received by the server." }, "result": { "$ref": "PerDeviceStatusInBatch", - "description": "Processing result for every device." + "description": "The processing result for each device." }, "unclaim": { "$ref": "PartnerUnclaim", - "description": "Request to unclaim a device." + "description": "A copy of the original device-unclaim request received by the server." }, "updateMetadata": { "$ref": "UpdateMetadataArguments", - "description": "Request to set metadata for a device." + "description": "A copy of the original metadata-update request received by the server." } }, "type": "object" @@ -1360,20 +1360,20 @@ "id": "PartnerClaim", "properties": { "customerId": { - "description": "Customer ID to claim for.", + "description": "Required. The ID of the customer for whom the device is being claimed.", "format": "int64", "type": "string" }, "deviceIdentifier": { "$ref": "DeviceIdentifier", - "description": "Device identifier of the device." + "description": "Required. Device identifier of the device." }, "deviceMetadata": { "$ref": "DeviceMetadata", - "description": "Metadata to set at claim." + "description": "Required. The metadata to attach to the device at claim." }, "sectionType": { - "description": "Section type to claim.", + "description": "Required. The section type of the device's provisioning record.", "enum": [ "SECTION_TYPE_UNSPECIFIED", "SECTION_TYPE_ZERO_TOUCH" @@ -1401,7 +1401,7 @@ "description": "Device identifier of the device." }, "sectionType": { - "description": "Section type to unclaim.", + "description": "Required. The section type of the device's provisioning record.", "enum": [ "SECTION_TYPE_UNSPECIFIED", "SECTION_TYPE_ZERO_TOUCH" @@ -1416,24 +1416,24 @@ "type": "object" }, "PerDeviceStatusInBatch": { - "description": "Stores the processing result for each device.", + "description": "Captures the processing status for each device in the operation.", "id": "PerDeviceStatusInBatch", "properties": { "deviceId": { - "description": "Device ID of the device if process succeeds.", + "description": "If processing succeeds, the device ID of the device.", "format": "int64", "type": "string" }, "errorIdentifier": { - "description": "Error identifier.", + "description": "If processing fails, the error type.", "type": "string" }, "errorMessage": { - "description": "Error message.", + "description": "If processing fails, a developer message explaining what went wrong.", "type": "string" }, "status": { - "description": "Process result.", + "description": "The result status of the device after processing.", "enum": [ "SINGLE_DEVICE_STATUS_UNSPECIFIED", "SINGLE_DEVICE_STATUS_UNKNOWN_ERROR", @@ -1500,7 +1500,7 @@ "description": "The device identifier you used when you claimed this device." }, "sectionType": { - "description": "The section type to unclaim for.", + "description": "Required. The section type of the device's provisioning record.", "enum": [ "SECTION_TYPE_UNSPECIFIED", "SECTION_TYPE_ZERO_TOUCH" @@ -1519,7 +1519,7 @@ "id": "UnclaimDevicesRequest", "properties": { "unclaims": { - "description": "List of devices to unclaim.", + "description": "Required. The list of devices to unclaim.", "items": { "$ref": "PartnerUnclaim" }, @@ -1533,7 +1533,7 @@ "id": "UpdateDeviceMetadataInBatchRequest", "properties": { "updates": { - "description": "List of metadata updates.", + "description": "Required. The list of metadata updates.", "items": { "$ref": "UpdateMetadataArguments" }, @@ -1548,7 +1548,7 @@ "properties": { "deviceMetadata": { "$ref": "DeviceMetadata", - "description": "The metdata to set." + "description": "Required. The metdata to attach to the device." } }, "type": "object" @@ -1568,7 +1568,7 @@ }, "deviceMetadata": { "$ref": "DeviceMetadata", - "description": "The metadata to update." + "description": "Required. The metadata to update." } }, "type": "object" diff --git a/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-gen.go b/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-gen.go index 20ba0fe40..973e06063 100644 --- a/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-gen.go +++ b/vendor/google.golang.org/api/androiddeviceprovisioning/v1/androiddeviceprovisioning-gen.go @@ -165,13 +165,16 @@ type PartnersDevicesService struct { // ClaimDeviceRequest: Request message to claim a device on behalf of a // customer. type ClaimDeviceRequest struct { - // CustomerId: The customer to claim for. + // CustomerId: Required. The ID of the customer for whom the device is + // being claimed. CustomerId int64 `json:"customerId,omitempty,string"` - // DeviceIdentifier: The device identifier of the device to claim. + // DeviceIdentifier: Required. The device identifier of the device to + // claim. DeviceIdentifier *DeviceIdentifier `json:"deviceIdentifier,omitempty"` - // SectionType: The section to claim. + // SectionType: Required. The section type of the device's provisioning + // record. // // Possible values: // "SECTION_TYPE_UNSPECIFIED" - Unspecified section type. @@ -240,9 +243,12 @@ func (s *ClaimDeviceResponse) MarshalJSON() ([]byte, error) { } // ClaimDevicesRequest: Request to claim devices asynchronously in -// batch. +// batch. Claiming a device adds the +// device to zero-touch enrollment and shows the device in the +// customer's view +// of the portal. type ClaimDevicesRequest struct { - // Claims: List of claims. + // Claims: Required. A list of device claims. Claims []*PartnerClaim `json:"claims,omitempty"` // ForceSendFields is a list of field names (e.g. "Claims") to @@ -939,12 +945,21 @@ func (s *DeviceReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DevicesLongRunningOperationMetadata: Long running operation metadata. +// DevicesLongRunningOperationMetadata: Tracks the status of a +// long-running operation to asynchronously update a +// batch of reseller metadata attached to devices. To learn more, +// read +// [Long‑running batch +// operations](/zero-touch/guides/how-it-works#operations). type DevicesLongRunningOperationMetadata struct { - // DevicesCount: Number of devices parsed in your requests. + // DevicesCount: The number of metadata updates in the operation. This + // might be different + // from the number of updates in the request if the API can't parse some + // of + // the updates. DevicesCount int64 `json:"devicesCount,omitempty"` - // ProcessingStatus: The overall processing status. + // ProcessingStatus: The processing status of the operation. // // Possible values: // "BATCH_PROCESS_STATUS_UNSPECIFIED" - Invalid code. Shouldn't be @@ -957,7 +972,11 @@ type DevicesLongRunningOperationMetadata struct { // check the `response` field for the result of every item. ProcessingStatus string `json:"processingStatus,omitempty"` - // Progress: Processing progress from 0 to 100. + // Progress: The processing progress of the operation. Measured as a + // number from 0 to + // 100. A value of 10O doesnt always mean the operation + // completed—check for + // the inclusion of a `done` field. Progress int64 `json:"progress,omitempty"` // ForceSendFields is a list of field names (e.g. "DevicesCount") to @@ -983,14 +1002,22 @@ func (s *DevicesLongRunningOperationMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// DevicesLongRunningOperationResponse: Long running operation response. +// DevicesLongRunningOperationResponse: Tracks the status of a +// long-running operation to claim, unclaim, or attach +// metadata to devices. To learn more, read +// [Long‑running batch +// operations](/zero-touch/guides/how-it-works#operations). type DevicesLongRunningOperationResponse struct { - // PerDeviceStatus: Processing status for each device. - // One `PerDeviceStatus` per device. The order is the same as in your - // requests. + // PerDeviceStatus: The processing status for each device in the + // operation. + // One `PerDeviceStatus` per device. The list order matches the items in + // the + // original request. PerDeviceStatus []*OperationPerDevice `json:"perDeviceStatus,omitempty"` - // SuccessCount: Number of succeesfully processed ones. + // SuccessCount: A summary of how many items in the operation the server + // processed + // successfully. Updated as the operation progresses. SuccessCount int64 `json:"successCount,omitempty"` // ForceSendFields is a list of field names (e.g. "PerDeviceStatus") to @@ -1088,13 +1115,15 @@ type Empty struct { // FindDevicesByDeviceIdentifierRequest: Request to find devices. type FindDevicesByDeviceIdentifierRequest struct { - // DeviceIdentifier: The device identifier to search. + // DeviceIdentifier: Required. The device identifier to search for. DeviceIdentifier *DeviceIdentifier `json:"deviceIdentifier,omitempty"` - // Limit: Number of devices to show. + // Limit: Required. The maximum number of devices to show in a page of + // results. Must + // be between 1 and 100 inclusive. Limit int64 `json:"limit,omitempty,string"` - // PageToken: Page token. + // PageToken: A token specifying which result page to return. PageToken string `json:"pageToken,omitempty"` // ForceSendFields is a list of field names (e.g. "DeviceIdentifier") to @@ -1127,7 +1156,9 @@ type FindDevicesByDeviceIdentifierResponse struct { // Devices: Found devices. Devices []*Device `json:"devices,omitempty"` - // NextPageToken: Page token of the next page. + // NextPageToken: A token used to access the next page of results. + // Omitted if no further + // results are available. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1159,16 +1190,19 @@ func (s *FindDevicesByDeviceIdentifierResponse) MarshalJSON() ([]byte, error) { // FindDevicesByOwnerRequest: Request to find devices by customers. type FindDevicesByOwnerRequest struct { - // CustomerId: List of customer IDs to search for. + // CustomerId: Required. The list of customer IDs to search for. CustomerId googleapi.Int64s `json:"customerId,omitempty"` - // Limit: The number of devices to show in the result. + // Limit: Required. The maximum number of devices to show in a page of + // results. Must + // be between 1 and 100 inclusive. Limit int64 `json:"limit,omitempty,string"` - // PageToken: Page token. + // PageToken: A token specifying which result page to return. PageToken string `json:"pageToken,omitempty"` - // SectionType: The section type. + // SectionType: Required. The section type of the device's provisioning + // record. // // Possible values: // "SECTION_TYPE_UNSPECIFIED" - Unspecified section type. @@ -1200,10 +1234,12 @@ func (s *FindDevicesByOwnerRequest) MarshalJSON() ([]byte, error) { // FindDevicesByOwnerResponse: Response containing found devices. type FindDevicesByOwnerResponse struct { - // Devices: Devices found. + // Devices: The customer's devices. Devices []*Device `json:"devices,omitempty"` - // NextPageToken: Page token of the next page. + // NextPageToken: A token used to access the next page of + // results. + // Omitted if no further results are available. NextPageToken string `json:"nextPageToken,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1236,7 +1272,7 @@ func (s *FindDevicesByOwnerResponse) MarshalJSON() ([]byte, error) { // ListCustomersResponse: Response message of all customers related to // this partner. type ListCustomersResponse struct { - // Customers: List of customers related to this partner. + // Customers: List of customers related to this reseller partner. Customers []*Company `json:"customers,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1327,18 +1363,23 @@ func (s *Operation) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// OperationPerDevice: Operation the server received for every device. +// OperationPerDevice: A task for each device in the operation. +// Corresponds to each device +// change in the request. type OperationPerDevice struct { - // Claim: Request to claim a device. + // Claim: A copy of the original device-claim request received by the + // server. Claim *PartnerClaim `json:"claim,omitempty"` - // Result: Processing result for every device. + // Result: The processing result for each device. Result *PerDeviceStatusInBatch `json:"result,omitempty"` - // Unclaim: Request to unclaim a device. + // Unclaim: A copy of the original device-unclaim request received by + // the server. Unclaim *PartnerUnclaim `json:"unclaim,omitempty"` - // UpdateMetadata: Request to set metadata for a device. + // UpdateMetadata: A copy of the original metadata-update request + // received by the server. UpdateMetadata *UpdateMetadataArguments `json:"updateMetadata,omitempty"` // ForceSendFields is a list of field names (e.g. "Claim") to @@ -1366,16 +1407,19 @@ func (s *OperationPerDevice) MarshalJSON() ([]byte, error) { // PartnerClaim: Identifies one claim request. type PartnerClaim struct { - // CustomerId: Customer ID to claim for. + // CustomerId: Required. The ID of the customer for whom the device is + // being claimed. CustomerId int64 `json:"customerId,omitempty,string"` - // DeviceIdentifier: Device identifier of the device. + // DeviceIdentifier: Required. Device identifier of the device. DeviceIdentifier *DeviceIdentifier `json:"deviceIdentifier,omitempty"` - // DeviceMetadata: Metadata to set at claim. + // DeviceMetadata: Required. The metadata to attach to the device at + // claim. DeviceMetadata *DeviceMetadata `json:"deviceMetadata,omitempty"` - // SectionType: Section type to claim. + // SectionType: Required. The section type of the device's provisioning + // record. // // Possible values: // "SECTION_TYPE_UNSPECIFIED" - Unspecified section type. @@ -1413,7 +1457,8 @@ type PartnerUnclaim struct { // DeviceIdentifier: Device identifier of the device. DeviceIdentifier *DeviceIdentifier `json:"deviceIdentifier,omitempty"` - // SectionType: Section type to unclaim. + // SectionType: Required. The section type of the device's provisioning + // record. // // Possible values: // "SECTION_TYPE_UNSPECIFIED" - Unspecified section type. @@ -1443,18 +1488,20 @@ func (s *PartnerUnclaim) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// PerDeviceStatusInBatch: Stores the processing result for each device. +// PerDeviceStatusInBatch: Captures the processing status for each +// device in the operation. type PerDeviceStatusInBatch struct { - // DeviceId: Device ID of the device if process succeeds. + // DeviceId: If processing succeeds, the device ID of the device. DeviceId int64 `json:"deviceId,omitempty,string"` - // ErrorIdentifier: Error identifier. + // ErrorIdentifier: If processing fails, the error type. ErrorIdentifier string `json:"errorIdentifier,omitempty"` - // ErrorMessage: Error message. + // ErrorMessage: If processing fails, a developer message explaining + // what went wrong. ErrorMessage string `json:"errorMessage,omitempty"` - // Status: Process result. + // Status: The result status of the device after processing. // // Possible values: // "SINGLE_DEVICE_STATUS_UNSPECIFIED" - Invalid code. Shouldn't be @@ -1625,7 +1672,8 @@ type UnclaimDeviceRequest struct { // this device. DeviceIdentifier *DeviceIdentifier `json:"deviceIdentifier,omitempty"` - // SectionType: The section type to unclaim for. + // SectionType: Required. The section type of the device's provisioning + // record. // // Possible values: // "SECTION_TYPE_UNSPECIFIED" - Unspecified section type. @@ -1658,7 +1706,7 @@ func (s *UnclaimDeviceRequest) MarshalJSON() ([]byte, error) { // UnclaimDevicesRequest: Request to unclaim devices asynchronously in // batch. type UnclaimDevicesRequest struct { - // Unclaims: List of devices to unclaim. + // Unclaims: Required. The list of devices to unclaim. Unclaims []*PartnerUnclaim `json:"unclaims,omitempty"` // ForceSendFields is a list of field names (e.g. "Unclaims") to @@ -1687,7 +1735,7 @@ func (s *UnclaimDevicesRequest) MarshalJSON() ([]byte, error) { // UpdateDeviceMetadataInBatchRequest: Request to update device metadata // in batch. type UpdateDeviceMetadataInBatchRequest struct { - // Updates: List of metadata updates. + // Updates: Required. The list of metadata updates. Updates []*UpdateMetadataArguments `json:"updates,omitempty"` // ForceSendFields is a list of field names (e.g. "Updates") to @@ -1715,7 +1763,7 @@ func (s *UpdateDeviceMetadataInBatchRequest) MarshalJSON() ([]byte, error) { // UpdateDeviceMetadataRequest: Request to set metadata for a device. type UpdateDeviceMetadataRequest struct { - // DeviceMetadata: The metdata to set. + // DeviceMetadata: Required. The metdata to attach to the device. DeviceMetadata *DeviceMetadata `json:"deviceMetadata,omitempty"` // ForceSendFields is a list of field names (e.g. "DeviceMetadata") to @@ -1750,7 +1798,7 @@ type UpdateMetadataArguments struct { // DeviceIdentifier: Device identifier. DeviceIdentifier *DeviceIdentifier `json:"deviceIdentifier,omitempty"` - // DeviceMetadata: The metadata to update. + // DeviceMetadata: Required. The metadata to update. DeviceMetadata *DeviceMetadata `json:"deviceMetadata,omitempty"` // ForceSendFields is a list of field names (e.g. "DeviceId") to @@ -3892,7 +3940,7 @@ func (c *PartnersCustomersListCall) Do(opts ...googleapi.CallOption) (*ListCusto // ], // "parameters": { // "partnerId": { - // "description": "The ID of the partner.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -3919,7 +3967,10 @@ type PartnersDevicesClaimCall struct { header_ http.Header } -// Claim: Claim the device identified by device identifier. +// Claim: Claims a device for a customer and adds it to zero-touch +// enrollment. If the +// device is already claimed by another customer, the call returns an +// error. func (r *PartnersDevicesService) Claim(partnerId int64, claimdevicerequest *ClaimDeviceRequest) *PartnersDevicesClaimCall { c := &PartnersDevicesClaimCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -4013,7 +4064,7 @@ func (c *PartnersDevicesClaimCall) Do(opts ...googleapi.CallOption) (*ClaimDevic } return ret, nil // { - // "description": "Claim the device identified by device identifier.", + // "description": "Claims a device for a customer and adds it to zero-touch enrollment. If the\ndevice is already claimed by another customer, the call returns an error.", // "flatPath": "v1/partners/{partnersId}/devices:claim", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.claim", @@ -4022,7 +4073,7 @@ func (c *PartnersDevicesClaimCall) Do(opts ...googleapi.CallOption) (*ClaimDevic // ], // "parameters": { // "partnerId": { - // "description": "ID of the partner.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4052,7 +4103,11 @@ type PartnersDevicesClaimAsyncCall struct { header_ http.Header } -// ClaimAsync: Claim devices asynchronously. +// ClaimAsync: Claims a batch of devices for a customer asynchronously. +// Adds the devices +// to zero-touch enrollment. To learn more, read [Long‑running +// batch +// operations](/zero-touch/guides/how-it-works#operations). func (r *PartnersDevicesService) ClaimAsync(partnerId int64, claimdevicesrequest *ClaimDevicesRequest) *PartnersDevicesClaimAsyncCall { c := &PartnersDevicesClaimAsyncCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -4146,7 +4201,7 @@ func (c *PartnersDevicesClaimAsyncCall) Do(opts ...googleapi.CallOption) (*Opera } return ret, nil // { - // "description": "Claim devices asynchronously.", + // "description": "Claims a batch of devices for a customer asynchronously. Adds the devices\nto zero-touch enrollment. To learn more, read [Long‑running batch\noperations](/zero-touch/guides/how-it-works#operations).", // "flatPath": "v1/partners/{partnersId}/devices:claimAsync", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.claimAsync", @@ -4155,7 +4210,7 @@ func (c *PartnersDevicesClaimAsyncCall) Do(opts ...googleapi.CallOption) (*Opera // ], // "parameters": { // "partnerId": { - // "description": "Partner ID.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4185,7 +4240,8 @@ type PartnersDevicesFindByIdentifierCall struct { header_ http.Header } -// FindByIdentifier: Find devices by device identifier. +// FindByIdentifier: Finds devices by hardware identifiers, such as +// IMEI. func (r *PartnersDevicesService) FindByIdentifier(partnerId int64, finddevicesbydeviceidentifierrequest *FindDevicesByDeviceIdentifierRequest) *PartnersDevicesFindByIdentifierCall { c := &PartnersDevicesFindByIdentifierCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -4280,7 +4336,7 @@ func (c *PartnersDevicesFindByIdentifierCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Find devices by device identifier.", + // "description": "Finds devices by hardware identifiers, such as IMEI.", // "flatPath": "v1/partners/{partnersId}/devices:findByIdentifier", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.findByIdentifier", @@ -4289,7 +4345,7 @@ func (c *PartnersDevicesFindByIdentifierCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "partnerId": { - // "description": "ID of the partner.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4340,7 +4396,13 @@ type PartnersDevicesFindByOwnerCall struct { header_ http.Header } -// FindByOwner: Find devices by ownership. +// FindByOwner: Finds devices claimed for customers. The results only +// contain devices +// registered to the reseller that's identified by the `partnerId` +// argument. +// The customer's devices purchased from other resellers don't appear in +// the +// results. func (r *PartnersDevicesService) FindByOwner(partnerId int64, finddevicesbyownerrequest *FindDevicesByOwnerRequest) *PartnersDevicesFindByOwnerCall { c := &PartnersDevicesFindByOwnerCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -4434,7 +4496,7 @@ func (c *PartnersDevicesFindByOwnerCall) Do(opts ...googleapi.CallOption) (*Find } return ret, nil // { - // "description": "Find devices by ownership.", + // "description": "Finds devices claimed for customers. The results only contain devices\nregistered to the reseller that's identified by the `partnerId` argument.\nThe customer's devices purchased from other resellers don't appear in the\nresults.", // "flatPath": "v1/partners/{partnersId}/devices:findByOwner", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.findByOwner", @@ -4443,7 +4505,7 @@ func (c *PartnersDevicesFindByOwnerCall) Do(opts ...googleapi.CallOption) (*Find // ], // "parameters": { // "partnerId": { - // "description": "ID of the partner.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4494,7 +4556,7 @@ type PartnersDevicesGetCall struct { header_ http.Header } -// Get: Get a device. +// Get: Gets a device. func (r *PartnersDevicesService) Get(name string) *PartnersDevicesGetCall { c := &PartnersDevicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -4595,7 +4657,7 @@ func (c *PartnersDevicesGetCall) Do(opts ...googleapi.CallOption) (*Device, erro } return ret, nil // { - // "description": "Get a device.", + // "description": "Gets a device.", // "flatPath": "v1/partners/{partnersId}/devices/{devicesId}", // "httpMethod": "GET", // "id": "androiddeviceprovisioning.partners.devices.get", @@ -4604,7 +4666,7 @@ func (c *PartnersDevicesGetCall) Do(opts ...googleapi.CallOption) (*Device, erro // ], // "parameters": { // "name": { - // "description": "Resource name in `partners/[PARTNER_ID]/devices/[DEVICE_ID]`.", + // "description": "Required. The device API resource name in the format\n`partners/[PARTNER_ID]/devices/[DEVICE_ID]`.", // "location": "path", // "pattern": "^partners/[^/]+/devices/[^/]+$", // "required": true, @@ -4631,7 +4693,7 @@ type PartnersDevicesMetadataCall struct { header_ http.Header } -// Metadata: Update the metadata. +// Metadata: Updates reseller metadata associated with the device. func (r *PartnersDevicesService) Metadata(metadataOwnerId int64, deviceId int64, updatedevicemetadatarequest *UpdateDeviceMetadataRequest) *PartnersDevicesMetadataCall { c := &PartnersDevicesMetadataCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.metadataOwnerId = metadataOwnerId @@ -4727,7 +4789,7 @@ func (c *PartnersDevicesMetadataCall) Do(opts ...googleapi.CallOption) (*DeviceM } return ret, nil // { - // "description": "Update the metadata.", + // "description": "Updates reseller metadata associated with the device.", // "flatPath": "v1/partners/{partnersId}/devices/{devicesId}/metadata", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.metadata", @@ -4737,7 +4799,7 @@ func (c *PartnersDevicesMetadataCall) Do(opts ...googleapi.CallOption) (*DeviceM // ], // "parameters": { // "deviceId": { - // "description": "ID of the partner.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4745,7 +4807,7 @@ func (c *PartnersDevicesMetadataCall) Do(opts ...googleapi.CallOption) (*DeviceM // "type": "string" // }, // "metadataOwnerId": { - // "description": "The owner of the newly set metadata. Set this to the partner ID.", + // "description": "Required. The owner of the newly set metadata. Set this to the partner ID.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4775,8 +4837,9 @@ type PartnersDevicesUnclaimCall struct { header_ http.Header } -// Unclaim: Unclaim the device identified by the `device_id` or the -// `deviceIdentifier`. +// Unclaim: Unclaims a device from a customer and removes it from +// zero-touch +// enrollment. func (r *PartnersDevicesService) Unclaim(partnerId int64, unclaimdevicerequest *UnclaimDeviceRequest) *PartnersDevicesUnclaimCall { c := &PartnersDevicesUnclaimCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -4870,7 +4933,7 @@ func (c *PartnersDevicesUnclaimCall) Do(opts ...googleapi.CallOption) (*Empty, e } return ret, nil // { - // "description": "Unclaim the device identified by the `device_id` or the `deviceIdentifier`.", + // "description": "Unclaims a device from a customer and removes it from zero-touch\nenrollment.", // "flatPath": "v1/partners/{partnersId}/devices:unclaim", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.unclaim", @@ -4879,7 +4942,7 @@ func (c *PartnersDevicesUnclaimCall) Do(opts ...googleapi.CallOption) (*Empty, e // ], // "parameters": { // "partnerId": { - // "description": "ID of the partner.", + // "description": "Required. The ID of the reseller partner.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -4909,7 +4972,12 @@ type PartnersDevicesUnclaimAsyncCall struct { header_ http.Header } -// UnclaimAsync: Unclaim devices asynchronously. +// UnclaimAsync: Unclaims a batch of devices for a customer +// asynchronously. Removes the +// devices from zero-touch enrollment. To learn more, read +// [Long‑running +// batch +// operations](/zero-touch/guides/how-it-works#operations). func (r *PartnersDevicesService) UnclaimAsync(partnerId int64, unclaimdevicesrequest *UnclaimDevicesRequest) *PartnersDevicesUnclaimAsyncCall { c := &PartnersDevicesUnclaimAsyncCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -5003,7 +5071,7 @@ func (c *PartnersDevicesUnclaimAsyncCall) Do(opts ...googleapi.CallOption) (*Ope } return ret, nil // { - // "description": "Unclaim devices asynchronously.", + // "description": "Unclaims a batch of devices for a customer asynchronously. Removes the\ndevices from zero-touch enrollment. To learn more, read [Long‑running batch\noperations](/zero-touch/guides/how-it-works#operations).", // "flatPath": "v1/partners/{partnersId}/devices:unclaimAsync", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.unclaimAsync", @@ -5012,7 +5080,7 @@ func (c *PartnersDevicesUnclaimAsyncCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "partnerId": { - // "description": "Partner ID.", + // "description": "Required. The reseller partner ID.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", @@ -5042,7 +5110,13 @@ type PartnersDevicesUpdateMetadataAsyncCall struct { header_ http.Header } -// UpdateMetadataAsync: Set metadata in batch asynchronously. +// UpdateMetadataAsync: Updates the reseller metadata attached to a +// batch of devices. This method +// updates devices asynchronously and returns an `Operation` that can be +// used +// to track progress. Read [Long‑running +// batch +// operations](/zero-touch/guides/how-it-works#operations). func (r *PartnersDevicesService) UpdateMetadataAsync(partnerId int64, updatedevicemetadatainbatchrequest *UpdateDeviceMetadataInBatchRequest) *PartnersDevicesUpdateMetadataAsyncCall { c := &PartnersDevicesUpdateMetadataAsyncCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.partnerId = partnerId @@ -5136,7 +5210,7 @@ func (c *PartnersDevicesUpdateMetadataAsyncCall) Do(opts ...googleapi.CallOption } return ret, nil // { - // "description": "Set metadata in batch asynchronously.", + // "description": "Updates the reseller metadata attached to a batch of devices. This method\nupdates devices asynchronously and returns an `Operation` that can be used\nto track progress. Read [Long‑running batch\noperations](/zero-touch/guides/how-it-works#operations).", // "flatPath": "v1/partners/{partnersId}/devices:updateMetadataAsync", // "httpMethod": "POST", // "id": "androiddeviceprovisioning.partners.devices.updateMetadataAsync", @@ -5145,7 +5219,7 @@ func (c *PartnersDevicesUpdateMetadataAsyncCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "partnerId": { - // "description": "Partner ID.", + // "description": "Required. The reseller partner ID.", // "format": "int64", // "location": "path", // "pattern": "^[^/]+$", diff --git a/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-api.json b/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-api.json index 9223bcf0f..bf34eec87 100644 --- a/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-api.json +++ b/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-api.json @@ -766,7 +766,7 @@ } } }, - "revision": "20180220", + "revision": "20180305", "rootUrl": "https://androidmanagement.googleapis.com/", "schemas": { "AlwaysOnVpnPackage": { @@ -986,6 +986,10 @@ "Reboot the device. Only supported on API level 24+." ], "type": "string" + }, + "userName": { + "description": "The resource name of the user that owns the device in the form enterprises/{enterpriseId}/users/{userId}. This is automatically generated by the server based on the device the command is sent to.", + "type": "string" } }, "type": "object" @@ -2106,6 +2110,24 @@ }, "type": "array" }, + "appAutoUpdatePolicy": { + "description": "The auto update policy value. Specifies whether the user is given a choice to configure the app update policy, or otherwise contains the enforced update policy", + "enum": [ + "APP_AUTO_UPDATE_POLICY_UNSPECIFIED", + "CHOICE_TO_THE_USER", + "NEVER", + "WIFI_ONLY", + "ALWAYS" + ], + "enumDescriptions": [ + "The auto-update policy is not set. Same as giving auto-update policy choice to the user.", + "The user can control auto-updates.", + "Apps are never auto-updated.", + "Apps are auto-updated over Wi-Fi only.", + "Apps are auto-updated at any time. Data charges may apply." + ], + "type": "string" + }, "applications": { "description": "Policy applied to apps.", "items": { diff --git a/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-gen.go b/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-gen.go index 66b47b14d..880a03768 100644 --- a/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-gen.go +++ b/vendor/google.golang.org/api/androidmanagement/v1/androidmanagement-gen.go @@ -455,6 +455,11 @@ type Command struct { // "REBOOT" - Reboot the device. Only supported on API level 24+. Type string `json:"type,omitempty"` + // UserName: The resource name of the user that owns the device in the + // form enterprises/{enterpriseId}/users/{userId}. This is automatically + // generated by the server based on the device the command is sent to. + UserName string `json:"userName,omitempty"` + // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1954,6 +1959,20 @@ type Policy struct { // "BETA" - The beta track, which provides the latest beta release. AndroidDevicePolicyTracks []string `json:"androidDevicePolicyTracks,omitempty"` + // AppAutoUpdatePolicy: The auto update policy value. Specifies whether + // the user is given a choice to configure the app update policy, or + // otherwise contains the enforced update policy + // + // Possible values: + // "APP_AUTO_UPDATE_POLICY_UNSPECIFIED" - The auto-update policy is + // not set. Same as giving auto-update policy choice to the user. + // "CHOICE_TO_THE_USER" - The user can control auto-updates. + // "NEVER" - Apps are never auto-updated. + // "WIFI_ONLY" - Apps are auto-updated over Wi-Fi only. + // "ALWAYS" - Apps are auto-updated at any time. Data charges may + // apply. + AppAutoUpdatePolicy string `json:"appAutoUpdatePolicy,omitempty"` + // Applications: Policy applied to apps. Applications []*ApplicationPolicy `json:"applications,omitempty"` diff --git a/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-api.json b/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-api.json index 34ed44449..f84bd86cf 100644 --- a/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-api.json +++ b/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-api.json @@ -15,7 +15,7 @@ "description": "Lets Android application developers access their Google Play accounts.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/android-publisher", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/JHo8f1I6kYzk10q6NwBbUC9DMxw\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/430OMYxRvf9sCM_VRkUKZWSfR7U\"", "icons": { "x16": "https://www.google.com/images/icons/product/android-16.png", "x32": "https://www.google.com/images/icons/product/android-32.png" @@ -1519,9 +1519,9 @@ "type": "string" }, "track": { - "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "location": "path", - "pattern": "(alpha|beta|production|rollout)", + "pattern": "(alpha|beta|production|rollout|internal)", "required": true, "type": "string" } @@ -1556,9 +1556,9 @@ "type": "string" }, "track": { - "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "location": "path", - "pattern": "(alpha|beta|production|rollout)", + "pattern": "(alpha|beta|production|rollout|internal)", "required": true, "type": "string" } @@ -1596,9 +1596,9 @@ "type": "string" }, "track": { - "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "location": "path", - "pattern": "(alpha|beta|production|rollout)", + "pattern": "(alpha|beta|production|rollout|internal)", "required": true, "type": "string" } @@ -1641,9 +1641,9 @@ "type": "string" }, "track": { - "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "location": "path", - "pattern": "(alpha|beta|production|rollout)", + "pattern": "(alpha|beta|production|rollout|internal)", "required": true, "type": "string" } @@ -1709,9 +1709,9 @@ "type": "string" }, "track": { - "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "location": "path", - "pattern": "(alpha|beta|production|rollout)", + "pattern": "(alpha|beta|production|rollout|internal)", "required": true, "type": "string" } @@ -1750,9 +1750,9 @@ "type": "string" }, "track": { - "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "location": "path", - "pattern": "(alpha|beta|production|rollout)", + "pattern": "(alpha|beta|production|rollout|internal)", "required": true, "type": "string" } @@ -2411,7 +2411,7 @@ } } }, - "revision": "20180211", + "revision": "20180319", "rootUrl": "https://www.googleapis.com/", "schemas": { "Apk": { @@ -3237,11 +3237,23 @@ "description": "A developer-specified string that contains supplemental information about an order.", "type": "string" }, + "emailAddress": { + "description": "The email address of the user when the subscription was purchased. Only present for purchases made with 'Subscribe with Google'.", + "type": "string" + }, "expiryTimeMillis": { "description": "Time at which the subscription will expire, in milliseconds since the Epoch.", "format": "int64", "type": "string" }, + "familyName": { + "description": "The family name of the user when the subscription was purchased. Only present for purchases made with 'Subscribe with Google'.", + "type": "string" + }, + "givenName": { + "description": "The given name of the user when the subscription was purchased. Only present for purchases made with 'Subscribe with Google'.", + "type": "string" + }, "kind": { "default": "androidpublisher#subscriptionPurchase", "description": "This kind represents a subscriptionPurchase object in the androidpublisher service.", @@ -3269,6 +3281,14 @@ "description": "ISO 4217 currency code for the subscription price. For example, if the price is specified in British pounds sterling, price_currency_code is \"GBP\".", "type": "string" }, + "profileId": { + "description": "The profile id of the user when the subscription was purchased. Only present for purchases made with 'Subscribe with Google'.", + "type": "string" + }, + "profileName": { + "description": "The profile name of the user when the subscription was purchased. Only present for purchases made with 'Subscribe with Google'.", + "type": "string" + }, "purchaseType": { "description": "The type of purchase of the subscription. This field is only set if this purchase was not made using the standard in-app billing flow. Possible values are: \n- Test (i.e. purchased from a license testing account)", "format": "int32", @@ -3356,7 +3376,7 @@ "id": "Track", "properties": { "track": { - "description": "Identifier for this track. One of \"alpha\", \"beta\", \"production\" or \"rollout\".", + "description": "Identifier for this track. One of \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", "type": "string" }, "userFraction": { diff --git a/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go b/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go index f6829c2b1..1127e1149 100644 --- a/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go +++ b/vendor/google.golang.org/api/androidpublisher/v2/androidpublisher-gen.go @@ -1815,10 +1815,25 @@ type SubscriptionPurchase struct { // supplemental information about an order. DeveloperPayload string `json:"developerPayload,omitempty"` + // EmailAddress: The email address of the user when the subscription was + // purchased. Only present for purchases made with 'Subscribe with + // Google'. + EmailAddress string `json:"emailAddress,omitempty"` + // ExpiryTimeMillis: Time at which the subscription will expire, in // milliseconds since the Epoch. ExpiryTimeMillis int64 `json:"expiryTimeMillis,omitempty,string"` + // FamilyName: The family name of the user when the subscription was + // purchased. Only present for purchases made with 'Subscribe with + // Google'. + FamilyName string `json:"familyName,omitempty"` + + // GivenName: The given name of the user when the subscription was + // purchased. Only present for purchases made with 'Subscribe with + // Google'. + GivenName string `json:"givenName,omitempty"` + // Kind: This kind represents a subscriptionPurchase object in the // androidpublisher service. Kind string `json:"kind,omitempty"` @@ -1859,6 +1874,16 @@ type SubscriptionPurchase struct { // price_currency_code is "GBP". PriceCurrencyCode string `json:"priceCurrencyCode,omitempty"` + // ProfileId: The profile id of the user when the subscription was + // purchased. Only present for purchases made with 'Subscribe with + // Google'. + ProfileId string `json:"profileId,omitempty"` + + // ProfileName: The profile name of the user when the subscription was + // purchased. Only present for purchases made with 'Subscribe with + // Google'. + ProfileName string `json:"profileName,omitempty"` + // PurchaseType: The type of purchase of the subscription. This field is // only set if this purchase was not made using the standard in-app // billing flow. Possible values are: @@ -2052,7 +2077,7 @@ func (s *TokenPagination) MarshalJSON() ([]byte, error) { type Track struct { // Track: Identifier for this track. One of "alpha", "beta", - // "production" or "rollout". + // "production", "rollout" or "internal". Track string `json:"track,omitempty"` UserFraction float64 `json:"userFraction,omitempty"` @@ -7556,9 +7581,9 @@ func (c *EditsTestersGetCall) Do(opts ...googleapi.CallOption) (*Testers, error) // "type": "string" // }, // "track": { - // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", // "location": "path", - // "pattern": "(alpha|beta|production|rollout)", + // "pattern": "(alpha|beta|production|rollout|internal)", // "required": true, // "type": "string" // } @@ -7706,9 +7731,9 @@ func (c *EditsTestersPatchCall) Do(opts ...googleapi.CallOption) (*Testers, erro // "type": "string" // }, // "track": { - // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", // "location": "path", - // "pattern": "(alpha|beta|production|rollout)", + // "pattern": "(alpha|beta|production|rollout|internal)", // "required": true, // "type": "string" // } @@ -7859,9 +7884,9 @@ func (c *EditsTestersUpdateCall) Do(opts ...googleapi.CallOption) (*Testers, err // "type": "string" // }, // "track": { - // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", // "location": "path", - // "pattern": "(alpha|beta|production|rollout)", + // "pattern": "(alpha|beta|production|rollout|internal)", // "required": true, // "type": "string" // } @@ -8021,9 +8046,9 @@ func (c *EditsTracksGetCall) Do(opts ...googleapi.CallOption) (*Track, error) { // "type": "string" // }, // "track": { - // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", // "location": "path", - // "pattern": "(alpha|beta|production|rollout)", + // "pattern": "(alpha|beta|production|rollout|internal)", // "required": true, // "type": "string" // } @@ -8322,9 +8347,9 @@ func (c *EditsTracksPatchCall) Do(opts ...googleapi.CallOption) (*Track, error) // "type": "string" // }, // "track": { - // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", // "location": "path", - // "pattern": "(alpha|beta|production|rollout)", + // "pattern": "(alpha|beta|production|rollout|internal)", // "required": true, // "type": "string" // } @@ -8478,9 +8503,9 @@ func (c *EditsTracksUpdateCall) Do(opts ...googleapi.CallOption) (*Track, error) // "type": "string" // }, // "track": { - // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\" or \"rollout\".", + // "description": "The track to read or modify. Acceptable values are: \"alpha\", \"beta\", \"production\", \"rollout\" or \"internal\".", // "location": "path", - // "pattern": "(alpha|beta|production|rollout)", + // "pattern": "(alpha|beta|production|rollout|internal)", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/api-list.json b/vendor/google.golang.org/api/api-list.json index 6e55b788e..f82819f32 100644 --- a/vendor/google.golang.org/api/api-list.json +++ b/vendor/google.golang.org/api/api-list.json @@ -791,7 +791,7 @@ "id": "cloudresourcemanager:v1", "name": "cloudresourcemanager", "version": "v1", - "title": "Google Cloud Resource Manager API", + "title": "Cloud Resource Manager API", "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v1", "icons": { @@ -799,14 +799,14 @@ "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" }, "documentationLink": "https://cloud.google.com/resource-manager", - "preferred": true + "preferred": false }, { "kind": "discovery#directoryItem", "id": "cloudresourcemanager:v1beta1", "name": "cloudresourcemanager", "version": "v1beta1", - "title": "Google Cloud Resource Manager API", + "title": "Cloud Resource Manager API", "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v1beta1", "icons": { @@ -816,12 +816,27 @@ "documentationLink": "https://cloud.google.com/resource-manager", "preferred": false }, + { + "kind": "discovery#directoryItem", + "id": "cloudresourcemanager:v2", + "name": "cloudresourcemanager", + "version": "v2", + "title": "Cloud Resource Manager API", + "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", + "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/resource-manager", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "cloudresourcemanager:v2beta1", "name": "cloudresourcemanager", "version": "v2beta1", - "title": "Google Cloud Resource Manager API", + "title": "Cloud Resource Manager API", "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", "discoveryRestUrl": "https://cloudresourcemanager.googleapis.com/$discovery/rest?version=v2beta1", "icons": { @@ -906,70 +921,6 @@ "documentationLink": "https://cloud.google.com/trace", "preferred": true }, - { - "kind": "discovery#directoryItem", - "id": "clouduseraccounts:alpha", - "name": "clouduseraccounts", - "version": "alpha", - "title": "Cloud User Accounts API", - "description": "Creates and manages users and groups for accessing Google Compute Engine virtual machines.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/clouduseraccounts/alpha/rest", - "discoveryLink": "./apis/clouduseraccounts/alpha/rest", - "icons": { - "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", - "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" - }, - "documentationLink": "https://cloud.google.com/compute/docs/access/user-accounts/api/latest/", - "preferred": false - }, - { - "kind": "discovery#directoryItem", - "id": "clouduseraccounts:beta", - "name": "clouduseraccounts", - "version": "beta", - "title": "Cloud User Accounts API", - "description": "Creates and manages users and groups for accessing Google Compute Engine virtual machines.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/clouduseraccounts/beta/rest", - "discoveryLink": "./apis/clouduseraccounts/beta/rest", - "icons": { - "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", - "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" - }, - "documentationLink": "https://cloud.google.com/compute/docs/access/user-accounts/api/latest/", - "preferred": false - }, - { - "kind": "discovery#directoryItem", - "id": "clouduseraccounts:vm_alpha", - "name": "clouduseraccounts", - "version": "vm_alpha", - "title": "Cloud User Accounts API", - "description": "Creates and manages users and groups for accessing Google Compute Engine virtual machines.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/clouduseraccounts/vm_alpha/rest", - "discoveryLink": "./apis/clouduseraccounts/vm_alpha/rest", - "icons": { - "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", - "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" - }, - "documentationLink": "https://cloud.google.com/compute/docs/access/user-accounts/api/latest/", - "preferred": true - }, - { - "kind": "discovery#directoryItem", - "id": "clouduseraccounts:vm_beta", - "name": "clouduseraccounts", - "version": "vm_beta", - "title": "Cloud User Accounts API", - "description": "Creates and manages users and groups for accessing Google Compute Engine virtual machines.", - "discoveryRestUrl": "https://www.googleapis.com/discovery/v1/apis/clouduseraccounts/vm_beta/rest", - "discoveryLink": "./apis/clouduseraccounts/vm_beta/rest", - "icons": { - "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", - "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" - }, - "documentationLink": "https://cloud.google.com/compute/docs/access/user-accounts/api/latest/", - "preferred": false - }, { "kind": "discovery#directoryItem", "id": "compute:alpha", @@ -1266,6 +1217,21 @@ "documentationLink": "https://developers.google.com/doubleclick-advertisers/", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "dialogflow:v2", + "name": "dialogflow", + "version": "v2", + "title": "Dialogflow API", + "description": "An end-to-end development suite for conversational interfaces (e.g., chatbots, voice-powered apps and devices).", + "discoveryRestUrl": "https://dialogflow.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dialogflow-enterprise/", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "dialogflow:v2beta1", @@ -1279,7 +1245,7 @@ "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" }, "documentationLink": "https://cloud.google.com/dialogflow-enterprise/", - "preferred": true + "preferred": false }, { "kind": "discovery#directoryItem", @@ -1312,13 +1278,28 @@ "documentationLink": "https://developers.google.com/discovery/", "preferred": true }, + { + "kind": "discovery#directoryItem", + "id": "dlp:v2", + "name": "dlp", + "version": "v2", + "title": "Cloud Data Loss Prevention (DLP) API", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "discoveryRestUrl": "https://dlp.googleapis.com/$discovery/rest?version=v2", + "icons": { + "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", + "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" + }, + "documentationLink": "https://cloud.google.com/dlp/docs/", + "preferred": true + }, { "kind": "discovery#directoryItem", "id": "dlp:v2beta1", "name": "dlp", "version": "v2beta1", - "title": "DLP API", - "description": "The Google Data Loss Prevention API provides methods for detection of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "title": "Cloud Data Loss Prevention (DLP) API", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", "discoveryRestUrl": "https://dlp.googleapis.com/$discovery/rest?version=v2beta1", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", @@ -1332,15 +1313,15 @@ "id": "dlp:v2beta2", "name": "dlp", "version": "v2beta2", - "title": "DLP API", - "description": "The Google Data Loss Prevention API provides methods for detection of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "title": "Cloud Data Loss Prevention (DLP) API", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", "discoveryRestUrl": "https://dlp.googleapis.com/$discovery/rest?version=v2beta2", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" }, "documentationLink": "https://cloud.google.com/dlp/docs/", - "preferred": true + "preferred": false }, { "kind": "discovery#directoryItem", @@ -2014,6 +1995,7 @@ "x16": "https://www.google.com/images/icons/product/pagespeed-16.png", "x32": "https://www.google.com/images/icons/product/pagespeed-32.png" }, + "documentationLink": "https://developers.google.com/speed/docs/insights/v4/getting-started", "preferred": true }, { @@ -2610,7 +2592,7 @@ "id": "speech:v1", "name": "speech", "version": "v1", - "title": "Google Cloud Speech API", + "title": "Cloud Speech API", "description": "Converts audio to text by applying powerful neural network models.", "discoveryRestUrl": "https://speech.googleapis.com/$discovery/rest?version=v1", "icons": { @@ -2625,7 +2607,7 @@ "id": "speech:v1beta1", "name": "speech", "version": "v1beta1", - "title": "Google Cloud Speech API", + "title": "Cloud Speech API", "description": "Converts audio to text by applying powerful neural network models.", "discoveryRestUrl": "https://speech.googleapis.com/$discovery/rest?version=v1beta1", "icons": { @@ -2961,7 +2943,7 @@ "id": "vision:v1p1beta1", "name": "vision", "version": "v1p1beta1", - "title": "Google Cloud Vision API", + "title": "Cloud Vision API", "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", "discoveryRestUrl": "https://vision.googleapis.com/$discovery/rest?version=v1p1beta1", "icons": { @@ -2976,7 +2958,7 @@ "id": "vision:v1p2beta1", "name": "vision", "version": "v1p2beta1", - "title": "Google Cloud Vision API", + "title": "Cloud Vision API", "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", "discoveryRestUrl": "https://vision.googleapis.com/$discovery/rest?version=v1p2beta1", "icons": { @@ -2991,7 +2973,7 @@ "id": "vision:v1", "name": "vision", "version": "v1", - "title": "Google Cloud Vision API", + "title": "Cloud Vision API", "description": "Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications.", "discoveryRestUrl": "https://vision.googleapis.com/$discovery/rest?version=v1", "icons": { diff --git a/vendor/google.golang.org/api/appengine/v1/appengine-api.json b/vendor/google.golang.org/api/appengine/v1/appengine-api.json index 5bdd77487..385b564d9 100644 --- a/vendor/google.golang.org/api/appengine/v1/appengine-api.json +++ b/vendor/google.golang.org/api/appengine/v1/appengine-api.json @@ -1583,7 +1583,7 @@ } } }, - "revision": "20180222", + "revision": "20180307", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -1708,7 +1708,7 @@ "type": "string" }, "locationId": { - "description": "Location from which this application runs. Application instances run out of the data centers in the specified location, which is also where all of the application's end user content is stored.Defaults to us-central1.View the list of supported locations (https://cloud.google.com/appengine/docs/locations).", + "description": "Location from which this application runs. Application instances run out of the data centers in the specified location, which is also where all of the application's end user content is stored.Defaults to us-central.View the list of supported locations (https://cloud.google.com/appengine/docs/locations).", "type": "string" }, "name": { @@ -2548,6 +2548,10 @@ "description": "A resource that represents Google Cloud Platform location.", "id": "Location", "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/appengine/v1/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1/appengine-gen.go index 78c585e9c..e0b15faf1 100644 --- a/vendor/google.golang.org/api/appengine/v1/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1/appengine-gen.go @@ -369,7 +369,7 @@ type Application struct { // LocationId: Location from which this application runs. Application // instances run out of the data centers in the specified location, // which is also where all of the application's end user content is - // stored.Defaults to us-central1.View the list of supported locations + // stored.Defaults to us-central.View the list of supported locations // (https://cloud.google.com/appengine/docs/locations). LocationId string `json:"locationId,omitempty"` @@ -1910,6 +1910,10 @@ func (s *LivenessCheck) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For // example // {"cloud.googleapis.com/region": "us-east1"} @@ -1933,7 +1937,7 @@ type Location struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1941,10 +1945,10 @@ type Location struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` diff --git a/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json b/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json index aead2f2ee..1404c0dae 100644 --- a/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json +++ b/vendor/google.golang.org/api/appengine/v1alpha/appengine-api.json @@ -705,7 +705,7 @@ } } }, - "revision": "20180222", + "revision": "20180307", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "AuthorizedCertificate": { @@ -937,6 +937,10 @@ "description": "A resource that represents Google Cloud Platform location.", "id": "Location", "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go index df6585bfa..fb9a243f7 100644 --- a/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1alpha/appengine-gen.go @@ -624,6 +624,10 @@ func (s *ListOperationsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For // example // {"cloud.googleapis.com/region": "us-east1"} @@ -647,7 +651,7 @@ type Location struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -655,10 +659,10 @@ type Location struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` diff --git a/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json b/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json index 4c0ada888..4394d65c1 100644 --- a/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json +++ b/vendor/google.golang.org/api/appengine/v1beta/appengine-api.json @@ -1583,7 +1583,7 @@ } } }, - "revision": "20180222", + "revision": "20180307", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -1708,7 +1708,7 @@ "type": "string" }, "locationId": { - "description": "Location from which this application runs. Application instances run out of the data centers in the specified location, which is also where all of the application's end user content is stored.Defaults to us-central1.View the list of supported locations (https://cloud.google.com/appengine/docs/locations).", + "description": "Location from which this application runs. Application instances run out of the data centers in the specified location, which is also where all of the application's end user content is stored.Defaults to us-central.View the list of supported locations (https://cloud.google.com/appengine/docs/locations).", "type": "string" }, "name": { @@ -2627,6 +2627,10 @@ "description": "A resource that represents Google Cloud Platform location.", "id": "Location", "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go index f197d871a..2cf2ef669 100644 --- a/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1beta/appengine-gen.go @@ -369,7 +369,7 @@ type Application struct { // LocationId: Location from which this application runs. Application // instances run out of the data centers in the specified location, // which is also where all of the application's end user content is - // stored.Defaults to us-central1.View the list of supported locations + // stored.Defaults to us-central.View the list of supported locations // (https://cloud.google.com/appengine/docs/locations). LocationId string `json:"locationId,omitempty"` @@ -2072,6 +2072,10 @@ func (s *LivenessCheck) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For // example // {"cloud.googleapis.com/region": "us-east1"} @@ -2095,7 +2099,7 @@ type Location struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2103,10 +2107,10 @@ type Location struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` diff --git a/vendor/google.golang.org/api/appengine/v1beta4/appengine-api.json b/vendor/google.golang.org/api/appengine/v1beta4/appengine-api.json index c7faa0b72..48a2ff942 100644 --- a/vendor/google.golang.org/api/appengine/v1beta4/appengine-api.json +++ b/vendor/google.golang.org/api/appengine/v1beta4/appengine-api.json @@ -938,7 +938,7 @@ } } }, - "revision": "20180222", + "revision": "20180307", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -1584,6 +1584,10 @@ "description": "A resource that represents Google Cloud Platform location.", "id": "Location", "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go index 9e319ddf7..a15ca6670 100644 --- a/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1beta4/appengine-gen.go @@ -1257,6 +1257,10 @@ func (s *ListVersionsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For // example // {"cloud.googleapis.com/region": "us-east1"} @@ -1280,7 +1284,7 @@ type Location struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1288,10 +1292,10 @@ type Location struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` diff --git a/vendor/google.golang.org/api/appengine/v1beta5/appengine-api.json b/vendor/google.golang.org/api/appengine/v1beta5/appengine-api.json index 387c7e315..91c8ae6c3 100644 --- a/vendor/google.golang.org/api/appengine/v1beta5/appengine-api.json +++ b/vendor/google.golang.org/api/appengine/v1beta5/appengine-api.json @@ -938,7 +938,7 @@ } } }, - "revision": "20180222", + "revision": "20180307", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { @@ -1584,6 +1584,10 @@ "description": "A resource that represents Google Cloud Platform location.", "id": "Location", "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go b/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go index 23bbb1f1a..73db1628a 100644 --- a/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go +++ b/vendor/google.golang.org/api/appengine/v1beta5/appengine-gen.go @@ -1257,6 +1257,10 @@ func (s *ListVersionsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For // example // {"cloud.googleapis.com/region": "us-east1"} @@ -1280,7 +1284,7 @@ type Location struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -1288,10 +1292,10 @@ type Location struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json index 817754293..9a8bd8431 100644 --- a/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-api.json @@ -32,7 +32,7 @@ "description": "A data platform for customers to create, manage, share and query data.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/bigquery/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/635jJ7Ek3ovMh_Mrru9RCl0fBYw\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/77sYFu0di9EzzwuBGmUFES4JWbE\"", "icons": { "x16": "https://www.google.com/images/icons/product/search-16.gif", "x32": "https://www.google.com/images/icons/product/search-32.gif" @@ -998,7 +998,7 @@ } } }, - "revision": "20180223", + "revision": "20180311", "rootUrl": "https://www.googleapis.com/", "schemas": { "BigtableColumn": { @@ -2232,6 +2232,11 @@ "format": "int64", "type": "string" }, + "totalPartitionsProcessed": { + "description": "[Output-only] Total number of partitions processed from all partitioned tables referenced in the job.", + "format": "int64", + "type": "string" + }, "totalSlotMs": { "description": "[Output-only] Slot-milliseconds for the job.", "format": "int64", diff --git a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go index 5ebca4b10..a129d81be 100644 --- a/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go +++ b/vendor/google.golang.org/api/bigquery/v2/bigquery-gen.go @@ -2068,6 +2068,10 @@ type JobStatistics2 struct { // TotalBytesProcessed: [Output-only] Total bytes processed for the job. TotalBytesProcessed int64 `json:"totalBytesProcessed,omitempty,string"` + // TotalPartitionsProcessed: [Output-only] Total number of partitions + // processed from all partitioned tables referenced in the job. + TotalPartitionsProcessed int64 `json:"totalPartitionsProcessed,omitempty,string"` + // TotalSlotMs: [Output-only] Slot-milliseconds for the job. TotalSlotMs int64 `json:"totalSlotMs,omitempty,string"` diff --git a/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-api.json b/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-api.json index 09ae5a2a9..0741a3492 100644 --- a/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-api.json +++ b/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-api.json @@ -225,7 +225,7 @@ "locations": { "methods": { "get": { - "description": "Get information about a location.", + "description": "Gets information about a location.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", "httpMethod": "GET", "id": "bigquerydatatransfer.projects.locations.get", @@ -427,7 +427,6 @@ "$ref": "TransferConfig" }, "scopes": [ - "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, @@ -564,7 +563,6 @@ "$ref": "TransferConfig" }, "scopes": [ - "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, @@ -809,7 +807,6 @@ "$ref": "TransferConfig" }, "scopes": [ - "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, @@ -946,7 +943,6 @@ "$ref": "TransferConfig" }, "scopes": [ - "https://www.googleapis.com/auth/bigquery", "https://www.googleapis.com/auth/cloud-platform" ] }, @@ -1160,7 +1156,7 @@ } } }, - "revision": "20180303", + "revision": "20180317", "rootUrl": "https://bigquerydatatransfer.googleapis.com/", "schemas": { "CheckValidCredsRequest": { @@ -1277,7 +1273,7 @@ "type": "boolean" }, "transferType": { - "description": "Transfer type. Currently supports only batch transfers,\nwhich are transfers that use the BigQuery batch APIs (load or\nquery) to ingest the data.", + "description": "Deprecated. This field has no effect.", "enum": [ "TRANSFER_TYPE_UNSPECIFIED", "BATCH", diff --git a/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-gen.go b/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-gen.go index cbdffc41f..b28484300 100644 --- a/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-gen.go +++ b/vendor/google.golang.org/api/bigquerydatatransfer/v1/bigquerydatatransfer-gen.go @@ -346,10 +346,7 @@ type DataSource struct { // to different BigQuery targets. SupportsMultipleTransfers bool `json:"supportsMultipleTransfers,omitempty"` - // TransferType: Transfer type. Currently supports only batch - // transfers, - // which are transfers that use the BigQuery batch APIs (load or - // query) to ingest the data. + // TransferType: Deprecated. This field has no effect. // // Possible values: // "TRANSFER_TYPE_UNSPECIFIED" - Invalid or Unknown transfer type @@ -1734,7 +1731,7 @@ type ProjectsLocationsGetCall struct { header_ http.Header } -// Get: Get information about a location. +// Get: Gets information about a location. func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1835,7 +1832,7 @@ func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, } return ret, nil // { - // "description": "Get information about a location.", + // "description": "Gets information about a location.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", // "httpMethod": "GET", // "id": "bigquerydatatransfer.projects.locations.get", @@ -2710,7 +2707,6 @@ func (c *ProjectsLocationsTransferConfigsCreateCall) Do(opts ...googleapi.CallOp // "$ref": "TransferConfig" // }, // "scopes": [ - // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -3369,7 +3365,6 @@ func (c *ProjectsLocationsTransferConfigsPatchCall) Do(opts ...googleapi.CallOpt // "$ref": "TransferConfig" // }, // "scopes": [ - // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -4406,7 +4401,6 @@ func (c *ProjectsTransferConfigsCreateCall) Do(opts ...googleapi.CallOption) (*T // "$ref": "TransferConfig" // }, // "scopes": [ - // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } @@ -5065,7 +5059,6 @@ func (c *ProjectsTransferConfigsPatchCall) Do(opts ...googleapi.CallOption) (*Tr // "$ref": "TransferConfig" // }, // "scopes": [ - // "https://www.googleapis.com/auth/bigquery", // "https://www.googleapis.com/auth/cloud-platform" // ] // } diff --git a/vendor/google.golang.org/api/chat/v1/chat-api.json b/vendor/google.golang.org/api/chat/v1/chat-api.json index 3bb04d418..b26b3d4e7 100644 --- a/vendor/google.golang.org/api/chat/v1/chat-api.json +++ b/vendor/google.golang.org/api/chat/v1/chat-api.json @@ -3,7 +3,7 @@ "baseUrl": "https://chat.googleapis.com/", "batchPath": "batch", "canonicalName": "Hangouts Chat", - "description": "", + "description": "Create bots and extend the new Hangouts Chat.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/hangouts/chat", "fullyEncodeReservedExpansion": true, @@ -479,7 +479,7 @@ } } }, - "revision": "20180301", + "revision": "20180321", "rootUrl": "https://chat.googleapis.com/", "schemas": { "ActionParameter": { diff --git a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json index de55ec516..b3bf8532b 100644 --- a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json +++ b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-api.json @@ -251,7 +251,7 @@ ] }, "create": { - "description": "Starts a build with the specified configuration.\n\nThis method returns a long-running `Operation`, which includes the build ID.\nPass the build ID to `GetBuild` to determine the build status\n(such as `SUCCESS` or `FAILURE`).", + "description": "Starts a build with the specified configuration.\n\nThis method returns a long-running `Operation`, which includes the build\nID. Pass the build ID to `GetBuild` to determine the build status (such as\n`SUCCESS` or `FAILURE`).", "flatPath": "v1/projects/{projectId}/builds", "httpMethod": "POST", "id": "cloudbuild.projects.builds.create", @@ -572,7 +572,7 @@ } } }, - "revision": "20180301", + "revision": "20180315", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "Build": { @@ -880,7 +880,7 @@ "type": "array" }, "waitFor": { - "description": "The ID(s) of the step(s) that this build step depends on.\nThis build step will not start until all the build steps in `wait_for`\nhave completed successfully. If `wait_for` is empty, this build step will\nstart when all previous build steps in the `Build.Steps` list have completed\nsuccessfully.", + "description": "The ID(s) of the step(s) that this build step depends on.\nThis build step will not start until all the build steps in `wait_for`\nhave completed successfully. If `wait_for` is empty, this build step will\nstart when all previous build steps in the `Build.Steps` list have\ncompleted successfully.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go index fd2798d81..06366ca44 100644 --- a/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go +++ b/vendor/google.golang.org/api/cloudbuild/v1/cloudbuild-gen.go @@ -551,9 +551,9 @@ type BuildStep struct { // `wait_for` // have completed successfully. If `wait_for` is empty, this build step // will - // start when all previous build steps in the `Build.Steps` list have - // completed - // successfully. + // start when all previous build steps in the `Build.Steps` list + // have + // completed successfully. WaitFor []string `json:"waitFor,omitempty"` // ForceSendFields is a list of field names (e.g. "Args") to @@ -2085,9 +2085,10 @@ type ProjectsBuildsCreateCall struct { // Create: Starts a build with the specified configuration. // // This method returns a long-running `Operation`, which includes the -// build ID. -// Pass the build ID to `GetBuild` to determine the build status -// (such as `SUCCESS` or `FAILURE`). +// build +// ID. Pass the build ID to `GetBuild` to determine the build status +// (such as +// `SUCCESS` or `FAILURE`). func (r *ProjectsBuildsService) Create(projectId string, build *Build) *ProjectsBuildsCreateCall { c := &ProjectsBuildsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId @@ -2181,7 +2182,7 @@ func (c *ProjectsBuildsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Starts a build with the specified configuration.\n\nThis method returns a long-running `Operation`, which includes the build ID.\nPass the build ID to `GetBuild` to determine the build status\n(such as `SUCCESS` or `FAILURE`).", + // "description": "Starts a build with the specified configuration.\n\nThis method returns a long-running `Operation`, which includes the build\nID. Pass the build ID to `GetBuild` to determine the build status (such as\n`SUCCESS` or `FAILURE`).", // "flatPath": "v1/projects/{projectId}/builds", // "httpMethod": "POST", // "id": "cloudbuild.projects.builds.create", diff --git a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json index 126395e9f..97c1757f5 100644 --- a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json +++ b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-api.json @@ -557,7 +557,7 @@ "type": "string" }, "updateMask": { - "description": "Only updates the `device` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `credentials`, `enabled_state`, and `metadata`", + "description": "Only updates the `device` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `credentials`, `blocked`, and `metadata`", "format": "google-fieldmask", "location": "query", "type": "string" @@ -658,7 +658,7 @@ } } }, - "revision": "20180227", + "revision": "20180314", "rootUrl": "https://cloudiot.googleapis.com/", "schemas": { "Binding": { diff --git a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go index 148c59bc3..51dcf6d06 100644 --- a/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go +++ b/vendor/google.golang.org/api/cloudiot/v1/cloudiot-gen.go @@ -3475,8 +3475,7 @@ func (r *ProjectsLocationsRegistriesDevicesService) Patch(name string, device *D // The field mask must not be empty, and it must not contain fields // that // are immutable or only set by the server. -// Mutable top-level fields: `credentials`, `enabled_state`, and -// `metadata` +// Mutable top-level fields: `credentials`, `blocked`, and `metadata` func (c *ProjectsLocationsRegistriesDevicesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsRegistriesDevicesPatchCall { c.urlParams_.Set("updateMask", updateMask) return c @@ -3584,7 +3583,7 @@ func (c *ProjectsLocationsRegistriesDevicesPatchCall) Do(opts ...googleapi.CallO // "type": "string" // }, // "updateMask": { - // "description": "Only updates the `device` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `credentials`, `enabled_state`, and `metadata`", + // "description": "Only updates the `device` fields indicated by this mask.\nThe field mask must not be empty, and it must not contain fields that\nare immutable or only set by the server.\nMutable top-level fields: `credentials`, `blocked`, and `metadata`", // "format": "google-fieldmask", // "location": "query", // "type": "string" diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json index e8b61eca8..93885058e 100644 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json +++ b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-api.json @@ -120,7 +120,7 @@ "locations": { "methods": { "get": { - "description": "Get information about a location.", + "description": "Gets information about a location.", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", "httpMethod": "GET", "id": "cloudkms.projects.locations.get", @@ -855,7 +855,7 @@ } } }, - "revision": "20180303", + "revision": "20180316", "rootUrl": "https://cloudkms.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go index 478612b93..7b7d769ed 100644 --- a/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go +++ b/vendor/google.golang.org/api/cloudkms/v1/cloudkms-gen.go @@ -1186,7 +1186,7 @@ type ProjectsLocationsGetCall struct { header_ http.Header } -// Get: Get information about a location. +// Get: Gets information about a location. func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall { c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name @@ -1287,7 +1287,7 @@ func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, } return ret, nil // { - // "description": "Get information about a location.", + // "description": "Gets information about a location.", // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}", // "httpMethod": "GET", // "id": "cloudkms.projects.locations.get", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index 264347ef3..0187a314b 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -12,7 +12,7 @@ } }, "basePath": "", - "baseUrl": "https://content-cloudresourcemanager.googleapis.com/", + "baseUrl": "https://cloudresourcemanager.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud Resource Manager", "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", @@ -762,7 +762,7 @@ ] }, "delete": { - "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", "flatPath": "v1/projects/{projectId}", "httpMethod": "DELETE", "id": "cloudresourcemanager.projects.delete", @@ -1155,8 +1155,8 @@ } } }, - "revision": "20180305", - "rootUrl": "https://content-cloudresourcemanager.googleapis.com/", + "revision": "20180315", + "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { "description": "Identifying information for a single ancestor of a project.", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 2b358b1e1..f0c3b8d41 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "cloudresourcemanager:v1" const apiName = "cloudresourcemanager" const apiVersion = "v1" -const basePath = "https://content-cloudresourcemanager.googleapis.com/" +const basePath = "https://cloudresourcemanager.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -5699,11 +5699,8 @@ type ProjectsDeleteCall struct { // Delete: Marks the Project identified by the specified // `project_id` (for example, `my-project-123`) for deletion. -// This method will only affect the Project if the following criteria -// are met: -// -// + The Project does not have a billing account associated with it. -// + The Project has a lifecycle state of +// This method will only affect the Project if it has a lifecycle state +// of // ACTIVE. // // This method changes the Project's lifecycle state from @@ -5810,7 +5807,7 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } return ret, nil // { - // "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + // "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time,\nat which point the Project is no longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", // "flatPath": "v1/projects/{projectId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.projects.delete", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json index c07244d46..97ee2bb60 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-api.json @@ -12,7 +12,7 @@ } }, "basePath": "", - "baseUrl": "https://content-cloudresourcemanager.googleapis.com/", + "baseUrl": "https://cloudresourcemanager.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud Resource Manager", "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", @@ -327,7 +327,7 @@ ] }, "delete": { - "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", "flatPath": "v1beta1/projects/{projectId}", "httpMethod": "DELETE", "id": "cloudresourcemanager.projects.delete", @@ -576,8 +576,8 @@ } } }, - "revision": "20180305", - "rootUrl": "https://content-cloudresourcemanager.googleapis.com/", + "revision": "20180315", + "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { "description": "Identifying information for a single ancestor of a project.", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go index 40489a4e3..b7ce4a410 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1beta1/cloudresourcemanager-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "cloudresourcemanager:v1beta1" const apiName = "cloudresourcemanager" const apiVersion = "v1beta1" -const basePath = "https://content-cloudresourcemanager.googleapis.com/" +const basePath = "https://cloudresourcemanager.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -2220,11 +2220,8 @@ type ProjectsDeleteCall struct { // Delete: Marks the Project identified by the specified // `project_id` (for example, `my-project-123`) for deletion. -// This method will only affect the Project if the following criteria -// are met: -// -// + The Project does not have a billing account associated with it. -// + The Project has a lifecycle state of +// This method will only affect the Project if it has a lifecycle state +// of // ACTIVE. // // This method changes the Project's lifecycle state from @@ -2332,7 +2329,7 @@ func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { } return ret, nil // { - // "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if the following criteria are met:\n\n+ The Project does not have a billing account associated with it.\n+ The Project has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", + // "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", // "flatPath": "v1beta1/projects/{projectId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.projects.delete", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-api.json new file mode 100644 index 000000000..1fe2d03a1 --- /dev/null +++ b/vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-api.json @@ -0,0 +1,848 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + } + } + } + }, + "basePath": "", + "baseUrl": "https://cloudresourcemanager.googleapis.com/", + "batchPath": "batch", + "canonicalName": "Cloud Resource Manager", + "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/resource-manager", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "cloudresourcemanager:v2", + "kind": "discovery#restDescription", + "name": "cloudresourcemanager", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "pp": { + "default": "true", + "description": "Pretty-print response.", + "location": "query", + "type": "boolean" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "folders": { + "methods": { + "create": { + "description": "Creates a Folder in the resource hierarchy.\nReturns an Operation which can be used to track the progress of the\nfolder creation workflow.\nUpon success the Operation.response field will be populated with the\ncreated Folder.\n\nIn order to succeed, the addition of this new Folder must not violate\nthe Folder naming, height or fanout constraints.\n\n+ The Folder's display_name must be distinct from all other Folder's that\nshare its parent.\n+ The addition of the Folder must not cause the active Folder hierarchy\nto exceed a height of 4. Note, the full active + deleted Folder hierarchy\nis allowed to reach a height of 8; this provides additional headroom when\nmoving folders that contain deleted folders.\n+ The addition of the Folder must not cause the total number of Folders\nunder its parent to exceed 100.\n\nIf the operation fails due to a folder constraint violation,\na PreconditionFailure explaining the violation will be returned.\nIf the failure occurs synchronously then the PreconditionFailure\nwill be returned via the Status.details field and if it occurs\nasynchronously then the PreconditionFailure will be returned\nvia the the Operation.error field.\n\nThe caller must have `resourcemanager.folders.create` permission on the\nidentified parent.", + "flatPath": "v2/folders", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.create", + "parameterOrder": [], + "parameters": { + "parent": { + "description": "The resource name of the new Folder's parent.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + "location": "query", + "type": "string" + } + }, + "path": "v2/folders", + "request": { + "$ref": "Folder" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Requests deletion of a Folder. The Folder is moved into the\nDELETE_REQUESTED state\nimmediately, and is deleted approximately 30 days later. This method may\nonly be called on an empty Folder in the\nACTIVE state, where a Folder is empty if\nit doesn't contain any Folders or Projects in the\nACTIVE state.\nThe caller must have `resourcemanager.folders.delete` permission on the\nidentified folder.", + "flatPath": "v2/folders/{foldersId}", + "httpMethod": "DELETE", + "id": "cloudresourcemanager.folders.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "the resource name of the Folder to be deleted.\nMust be of the form `folders/{folder_id}`.", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Folder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves a Folder identified by the supplied resource name.\nValid Folder resource names have the format `folders/{folder_id}`\n(for example, `folders/1234`).\nThe caller must have `resourcemanager.folders.get` permission on the\nidentified folder.", + "flatPath": "v2/folders/{foldersId}", + "httpMethod": "GET", + "id": "cloudresourcemanager.folders.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the Folder to retrieve.\nMust be of the form `folders/{folder_id}`.", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "Folder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "getIamPolicy": { + "description": "Gets the access control policy for a Folder. The returned policy may be\nempty if no such policy or resource exists. The `resource` field should\nbe the Folder's resource name, e.g. \"folders/1234\".\nThe caller must have `resourcemanager.folders.getIamPolicy` permission\non the identified folder.", + "flatPath": "v2/folders/{foldersId}:getIamPolicy", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.getIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+resource}:getIamPolicy", + "request": { + "$ref": "GetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "list": { + "description": "Lists the Folders that are direct descendants of supplied parent resource.\nList provides a strongly consistent view of the Folders underneath\nthe specified parent resource.\nList returns Folders sorted based upon the (ascending) lexical ordering\nof their display_name.\nThe caller must have `resourcemanager.folders.list` permission on the\nidentified parent.", + "flatPath": "v2/folders", + "httpMethod": "GET", + "id": "cloudresourcemanager.folders.list", + "parameterOrder": [], + "parameters": { + "pageSize": { + "description": "The maximum number of Folders to return in the response.\nThis field is optional.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "A pagination token returned from a previous call to `ListFolders`\nthat indicates where this listing should continue from.\nThis field is optional.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The resource name of the Organization or Folder whose Folders are\nbeing listed.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.\nAccess to this method is controlled by checking the\n`resourcemanager.folders.list` permission on the `parent`.", + "location": "query", + "type": "string" + }, + "showDeleted": { + "description": "Controls whether Folders in the\nDELETE_REQUESTED\nstate should be returned. Defaults to false. This field is optional.", + "location": "query", + "type": "boolean" + } + }, + "path": "v2/folders", + "response": { + "$ref": "ListFoldersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "move": { + "description": "Moves a Folder under a new resource parent.\nReturns an Operation which can be used to track the progress of the\nfolder move workflow.\nUpon success the Operation.response field will be populated with the\nmoved Folder.\nUpon failure, a FolderOperationError categorizing the failure cause will\nbe returned - if the failure occurs synchronously then the\nFolderOperationError will be returned via the Status.details field\nand if it occurs asynchronously then the FolderOperation will be returned\nvia the the Operation.error field.\nIn addition, the Operation.metadata field will be populated with a\nFolderOperation message as an aid to stateless clients.\nFolder moves will be rejected if they violate either the naming, height\nor fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.move` permission on the\nfolder's current and proposed new parent.", + "flatPath": "v2/folders/{foldersId}:move", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.move", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the Folder to move.\nMust be of the form folders/{folder_id}", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:move", + "request": { + "$ref": "MoveFolderRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a Folder, changing its display_name.\nChanges to the folder display_name will be rejected if they violate either\nthe display_name formatting rules or naming constraints described in\nthe CreateFolder documentation.\n\nThe Folder's display name must start and end with a letter or digit,\nmay contain letters, digits, spaces, hyphens and underscores and can be\nno longer than 30 characters. This is captured by the regular expression:\n[\\p{L}\\p{N}]({\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.\nThe caller must have `resourcemanager.folders.update` permission on the\nidentified folder.\n\nIf the update fails due to the unique name constraint then a\nPreconditionFailure explaining this violation will be returned\nin the Status.details field.", + "flatPath": "v2/folders/{foldersId}", + "httpMethod": "PATCH", + "id": "cloudresourcemanager.folders.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Output only. The resource name of the Folder.\nIts format is `folders/{folder_id}`, for example: \"folders/1234\".", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Fields to be updated.\nOnly the `display_name` can be updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "Folder" + }, + "response": { + "$ref": "Folder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "search": { + "description": "Search for folders that match specific filter criteria.\nSearch provides an eventually consistent view of the folders a user has\naccess to which meet the specified filter criteria.\n\nThis will only return folders on which the caller has the\npermission `resourcemanager.folders.get`.", + "flatPath": "v2/folders:search", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.search", + "parameterOrder": [], + "parameters": {}, + "path": "v2/folders:search", + "request": { + "$ref": "SearchFoldersRequest" + }, + "response": { + "$ref": "SearchFoldersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only" + ] + }, + "setIamPolicy": { + "description": "Sets the access control policy on a Folder, replacing any existing policy.\nThe `resource` field should be the Folder's resource name, e.g.\n\"folders/1234\".\nThe caller must have `resourcemanager.folders.setIamPolicy` permission\non the identified folder.", + "flatPath": "v2/folders/{foldersId}:setIamPolicy", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.setIamPolicy", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+resource}:setIamPolicy", + "request": { + "$ref": "SetIamPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified Folder.\nThe `resource` field should be the Folder's resource name,\ne.g. \"folders/1234\".\n\nThere are no permissions required for making this API call.", + "flatPath": "v2/folders/{foldersId}:testIamPermissions", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.testIamPermissions", + "parameterOrder": [ + "resource" + ], + "parameters": { + "resource": { + "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+resource}:testIamPermissions", + "request": { + "$ref": "TestIamPermissionsRequest" + }, + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "undelete": { + "description": "Cancels the deletion request for a Folder. This method may only be\ncalled on a Folder in the\nDELETE_REQUESTED state.\nIn order to succeed, the Folder's parent must be in the\nACTIVE state.\nIn addition, reintroducing the folder into the tree must not violate\nfolder naming, height and fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.undelete` permission on the\nidentified folder.", + "flatPath": "v2/folders/{foldersId}:undelete", + "httpMethod": "POST", + "id": "cloudresourcemanager.folders.undelete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name of the Folder to undelete.\nMust be of the form `folders/{folder_id}`.", + "location": "path", + "pattern": "^folders/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:undelete", + "request": { + "$ref": "UndeleteFolderRequest" + }, + "response": { + "$ref": "Folder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + }, + "revision": "20180315", + "rootUrl": "https://cloudresourcemanager.googleapis.com/", + "schemas": { + "AuditConfig": { + "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"fooservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:bar@gmail.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts foo@gmail.com from DATA_READ logging, and\nbar@gmail.com from DATA_WRITE logging.", + "id": "AuditConfig", + "properties": { + "auditLogConfigs": { + "description": "The configuration for logging of each type of permission.\nNext ID: 4", + "items": { + "$ref": "AuditLogConfig" + }, + "type": "array" + }, + "service": { + "description": "Specifies a service that will be enabled for audit logging.\nFor example, `storage.googleapis.com`, `cloudsql.googleapis.com`.\n`allServices` is a special value that covers all services.", + "type": "string" + } + }, + "type": "object" + }, + "AuditLogConfig": { + "description": "Provides the configuration for logging a type of permissions.\nExample:\n\n {\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n }\n ]\n }\n\nThis enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting\nfoo@gmail.com from DATA_READ logging.", + "id": "AuditLogConfig", + "properties": { + "exemptedMembers": { + "description": "Specifies the identities that do not cause logging for this type of\npermission.\nFollows the same format of Binding.members.", + "items": { + "type": "string" + }, + "type": "array" + }, + "logType": { + "description": "The log type that this config enables.", + "enum": [ + "LOG_TYPE_UNSPECIFIED", + "ADMIN_READ", + "DATA_WRITE", + "DATA_READ" + ], + "enumDescriptions": [ + "Default case. Should never be this.", + "Admin reads. Example: CloudIAM getIamPolicy", + "Data writes. Example: CloudSQL Users create", + "Data reads. Example: CloudSQL Users list" + ], + "type": "string" + } + }, + "type": "object" + }, + "Binding": { + "description": "Associates `members` with a `role`.", + "id": "Binding", + "properties": { + "members": { + "description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` or `joe@example.com`.\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: A Google Apps domain name that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "description": "Role that is assigned to `members`.\nFor example, `roles/viewer`, `roles/editor`, or `roles/owner`.\nRequired", + "type": "string" + } + }, + "type": "object" + }, + "Folder": { + "description": "A Folder in an Organization's resource hierarchy, used to\norganize that Organization's resources.", + "id": "Folder", + "properties": { + "createTime": { + "description": "Output only. Timestamp when the Folder was created. Assigned by the server.", + "format": "google-datetime", + "type": "string" + }, + "displayName": { + "description": "The folder’s display name.\nA folder’s display name must be unique amongst its siblings, e.g.\nno two folders with the same parent can share the same display name.\nThe display name must start and end with a letter or digit, may contain\nletters, digits, spaces, hyphens and underscores and can be no longer\nthan 30 characters. This is captured by the regular expression:\n[\\p{L}\\p{N}]({\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.", + "type": "string" + }, + "lifecycleState": { + "description": "Output only. The lifecycle state of the folder.\nUpdates to the lifecycle_state must be performed via\nDeleteFolder and\nUndeleteFolder.", + "enum": [ + "LIFECYCLE_STATE_UNSPECIFIED", + "ACTIVE", + "DELETE_REQUESTED" + ], + "enumDescriptions": [ + "Unspecified state.", + "The normal and active state.", + "The folder has been marked for deletion by the user." + ], + "type": "string" + }, + "name": { + "description": "Output only. The resource name of the Folder.\nIts format is `folders/{folder_id}`, for example: \"folders/1234\".", + "type": "string" + }, + "parent": { + "description": "The Folder’s parent's resource name.\nUpdates to the folder's parent must be performed via\nMoveFolder.", + "type": "string" + } + }, + "type": "object" + }, + "FolderOperation": { + "description": "Metadata describing a long running folder operation", + "id": "FolderOperation", + "properties": { + "destinationParent": { + "description": "The resource name of the folder or organization we are either creating\nthe folder under or moving the folder to.", + "type": "string" + }, + "displayName": { + "description": "The display name of the folder.", + "type": "string" + }, + "operationType": { + "description": "The type of this operation.", + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "CREATE", + "MOVE" + ], + "enumDescriptions": [ + "Operation type not specified.", + "A create folder operation.", + "A move folder operation." + ], + "type": "string" + }, + "sourceParent": { + "description": "The resource name of the folder's parent.\nOnly applicable when the operation_type is MOVE.", + "type": "string" + } + }, + "type": "object" + }, + "FolderOperationError": { + "description": "A classification of the Folder Operation error.", + "id": "FolderOperationError", + "properties": { + "errorMessageId": { + "description": "The type of operation error experienced.", + "enum": [ + "ERROR_TYPE_UNSPECIFIED", + "ACTIVE_FOLDER_HEIGHT_VIOLATION", + "MAX_CHILD_FOLDERS_VIOLATION", + "FOLDER_NAME_UNIQUENESS_VIOLATION", + "RESOURCE_DELETED_VIOLATION", + "PARENT_DELETED_VIOLATION", + "CYCLE_INTRODUCED_VIOLATION", + "FOLDER_BEING_MOVED_VIOLATION", + "FOLDER_TO_DELETE_NON_EMPTY_VIOLATION", + "DELETED_FOLDER_HEIGHT_VIOLATION" + ], + "enumDescriptions": [ + "The error type was unrecognized or unspecified.", + "The attempted action would violate the max folder depth constraint.", + "The attempted action would violate the max child folders constraint.", + "The attempted action would violate the locally-unique folder\ndisplay_name constraint.", + "The resource being moved has been deleted.", + "The resource a folder was being added to has been deleted.", + "The attempted action would introduce cycle in resource path.", + "The attempted action would move a folder that is already being moved.", + "The folder the caller is trying to delete contains active resources.", + "The attempted action would violate the max deleted folder depth\nconstraint." + ], + "type": "string" + } + }, + "type": "object" + }, + "GetIamPolicyRequest": { + "description": "Request message for `GetIamPolicy` method.", + "id": "GetIamPolicyRequest", + "properties": {}, + "type": "object" + }, + "ListFoldersResponse": { + "description": "The ListFolders response message.", + "id": "ListFoldersResponse", + "properties": { + "folders": { + "description": "A possibly paginated list of Folders that are direct descendants of\nthe specified parent resource.", + "items": { + "$ref": "Folder" + }, + "type": "array" + }, + "nextPageToken": { + "description": "A pagination token returned from a previous call to `ListFolders`\nthat indicates from where listing should continue.\nThis field is optional.", + "type": "string" + } + }, + "type": "object" + }, + "MoveFolderRequest": { + "description": "The MoveFolder request message.", + "id": "MoveFolderRequest", + "properties": { + "destinationParent": { + "description": "The resource name of the Folder or Organization to reparent\nthe folder under.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + "type": "string" + } + }, + "type": "object" + }, + "Operation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "id": "Operation", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, + "error": { + "$ref": "Status", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object" + } + }, + "type": "object" + }, + "Policy": { + "description": "Defines an Identity and Access Management (IAM) policy. It is used to\nspecify access control policies for Cloud Platform resources.\n\n\nA `Policy` consists of a list of `bindings`. A `Binding` binds a list of\n`members` to a `role`, where the members can be user accounts, Google groups,\nGoogle domains, and service accounts. A `role` is a named list of permissions\ndefined by IAM.\n\n**Example**\n\n {\n \"bindings\": [\n {\n \"role\": \"roles/owner\",\n \"members\": [\n \"user:mike@example.com\",\n \"group:admins@example.com\",\n \"domain:google.com\",\n \"serviceAccount:my-other-app@appspot.gserviceaccount.com\",\n ]\n },\n {\n \"role\": \"roles/viewer\",\n \"members\": [\"user:sean@example.com\"]\n }\n ]\n }\n\nFor a description of IAM and its features, see the\n[IAM developer's guide](https://cloud.google.com/iam/docs).", + "id": "Policy", + "properties": { + "auditConfigs": { + "description": "Specifies cloud audit logging configuration for this policy.", + "items": { + "$ref": "AuditConfig" + }, + "type": "array" + }, + "bindings": { + "description": "Associates a list of `members` to a `role`.\n`bindings` with no members will result in an error.", + "items": { + "$ref": "Binding" + }, + "type": "array" + }, + "etag": { + "description": "`etag` is used for optimistic concurrency control as a way to help\nprevent simultaneous updates of a policy from overwriting each other.\nIt is strongly suggested that systems make use of the `etag` in the\nread-modify-write cycle to perform policy updates in order to avoid race\nconditions: An `etag` is returned in the response to `getIamPolicy`, and\nsystems are expected to put that etag in the request to `setIamPolicy` to\nensure that their change will be applied to the same version of the policy.\n\nIf no `etag` is provided in the call to `setIamPolicy`, then the existing\npolicy is overwritten blindly.", + "format": "byte", + "type": "string" + }, + "version": { + "description": "Deprecated.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "ProjectCreationStatus": { + "description": "A status object which is used as the `metadata` field for the Operation\nreturned by CreateProject. It provides insight for when significant phases of\nProject creation have completed.", + "id": "ProjectCreationStatus", + "properties": { + "createTime": { + "description": "Creation time of the project creation workflow.", + "format": "google-datetime", + "type": "string" + }, + "gettable": { + "description": "True if the project can be retrieved using GetProject. No other operations\non the project are guaranteed to work until the project creation is\ncomplete.", + "type": "boolean" + }, + "ready": { + "description": "True if the project creation process is complete.", + "type": "boolean" + } + }, + "type": "object" + }, + "SearchFoldersRequest": { + "description": "The request message for searching folders.", + "id": "SearchFoldersRequest", + "properties": { + "pageSize": { + "description": "The maximum number of folders to return in the response.\nThis field is optional.", + "format": "int32", + "type": "integer" + }, + "pageToken": { + "description": "A pagination token returned from a previous call to `SearchFolders`\nthat indicates from where search should continue.\nThis field is optional.", + "type": "string" + }, + "query": { + "description": "Search criteria used to select the Folders to return.\nIf no search criteria is specified then all accessible folders will be\nreturned.\n\nQuery expressions can be used to restrict results based upon displayName,\nlifecycleState and parent, where the operators `=`, `NOT`, `AND` and `OR`\ncan be used along with the suffix wildcard symbol `*`.\n\nSome example queries are:\n\n|Query | Description|\n|----- | -----------|\n|displayName=Test* | Folders whose display name starts with \"Test\".\n|lifecycleState=ACTIVE | Folders whose lifecycleState is ACTIVE.\n|parent=folders/123 | Folders whose parent is \"folders/123\".\n|parent=folders/123 AND lifecycleState=ACTIVE | Active folders whose parent is \"folders/123\".|", + "type": "string" + } + }, + "type": "object" + }, + "SearchFoldersResponse": { + "description": "The response message for searching folders.", + "id": "SearchFoldersResponse", + "properties": { + "folders": { + "description": "A possibly paginated folder search results.\nthe specified parent resource.", + "items": { + "$ref": "Folder" + }, + "type": "array" + }, + "nextPageToken": { + "description": "A pagination token returned from a previous call to `SearchFolders`\nthat indicates from where searching should continue.\nThis field is optional.", + "type": "string" + } + }, + "type": "object" + }, + "SetIamPolicyRequest": { + "description": "Request message for `SetIamPolicy` method.", + "id": "SetIamPolicyRequest", + "properties": { + "policy": { + "$ref": "Policy", + "description": "REQUIRED: The complete policy to be applied to the `resource`. The size of\nthe policy is limited to a few 10s of KB. An empty policy is a\nvalid policy but certain Cloud Platform services (such as Projects)\nmight reject them." + }, + "updateMask": { + "description": "OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only\nthe fields in the mask will be modified. If no mask is provided, the\nfollowing default mask is used:\npaths: \"bindings, etag\"\nThis field is only used by Cloud IAM.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, + "TestIamPermissionsRequest": { + "description": "Request message for `TestIamPermissions` method.", + "id": "TestIamPermissionsRequest", + "properties": { + "permissions": { + "description": "The set of permissions to check for the `resource`. Permissions with\nwildcards (such as '*' or 'storage.*') are not allowed. For more\ninformation see\n[IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "TestIamPermissionsResponse": { + "description": "Response message for `TestIamPermissions` method.", + "id": "TestIamPermissionsResponse", + "properties": { + "permissions": { + "description": "A subset of `TestPermissionsRequest.permissions` that the caller is\nallowed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "UndeleteFolderRequest": { + "description": "The UndeleteFolder request message.", + "id": "UndeleteFolderRequest", + "properties": {}, + "type": "object" + } + }, + "servicePath": "", + "title": "Cloud Resource Manager API", + "version": "v2" +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-gen.go new file mode 100644 index 000000000..b1434544a --- /dev/null +++ b/vendor/google.golang.org/api/cloudresourcemanager/v2/cloudresourcemanager-gen.go @@ -0,0 +1,2826 @@ +// Package cloudresourcemanager provides access to the Cloud Resource Manager API. +// +// See https://cloud.google.com/resource-manager +// +// Usage example: +// +// import "google.golang.org/api/cloudresourcemanager/v2" +// ... +// cloudresourcemanagerService, err := cloudresourcemanager.New(oauthHttpClient) +package cloudresourcemanager // import "google.golang.org/api/cloudresourcemanager/v2" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "cloudresourcemanager:v2" +const apiName = "cloudresourcemanager" +const apiVersion = "v2" +const basePath = "https://cloudresourcemanager.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Folders = NewFoldersService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Folders *FoldersService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewFoldersService(s *Service) *FoldersService { + rs := &FoldersService{s: s} + return rs +} + +type FoldersService struct { + s *Service +} + +// AuditConfig: Specifies the audit configuration for a service. +// The configuration determines which permission types are logged, and +// what +// identities, if any, are exempted from logging. +// An AuditConfig must have one or more AuditLogConfigs. +// +// If there are AuditConfigs for both `allServices` and a specific +// service, +// the union of the two AuditConfigs is used for that service: the +// log_types +// specified in each AuditConfig are enabled, and the exempted_members +// in each +// AuditLogConfig are exempted. +// +// Example Policy with multiple AuditConfigs: +// +// { +// "audit_configs": [ +// { +// "service": "allServices" +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:foo@gmail.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE", +// }, +// { +// "log_type": "ADMIN_READ", +// } +// ] +// }, +// { +// "service": "fooservice.googleapis.com" +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// }, +// { +// "log_type": "DATA_WRITE", +// "exempted_members": [ +// "user:bar@gmail.com" +// ] +// } +// ] +// } +// ] +// } +// +// For fooservice, this policy enables DATA_READ, DATA_WRITE and +// ADMIN_READ +// logging. It also exempts foo@gmail.com from DATA_READ logging, +// and +// bar@gmail.com from DATA_WRITE logging. +type AuditConfig struct { + // AuditLogConfigs: The configuration for logging of each type of + // permission. + // Next ID: 4 + AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` + + // Service: Specifies a service that will be enabled for audit + // logging. + // For example, `storage.googleapis.com`, + // `cloudsql.googleapis.com`. + // `allServices` is a special value that covers all services. + Service string `json:"service,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditLogConfigs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuditConfig) MarshalJSON() ([]byte, error) { + type NoMethod AuditConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AuditLogConfig: Provides the configuration for logging a type of +// permissions. +// Example: +// +// { +// "audit_log_configs": [ +// { +// "log_type": "DATA_READ", +// "exempted_members": [ +// "user:foo@gmail.com" +// ] +// }, +// { +// "log_type": "DATA_WRITE", +// } +// ] +// } +// +// This enables 'DATA_READ' and 'DATA_WRITE' logging, while +// exempting +// foo@gmail.com from DATA_READ logging. +type AuditLogConfig struct { + // ExemptedMembers: Specifies the identities that do not cause logging + // for this type of + // permission. + // Follows the same format of Binding.members. + ExemptedMembers []string `json:"exemptedMembers,omitempty"` + + // LogType: The log type that this config enables. + // + // Possible values: + // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. + // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy + // "DATA_WRITE" - Data writes. Example: CloudSQL Users create + // "DATA_READ" - Data reads. Example: CloudSQL Users list + LogType string `json:"logType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExemptedMembers") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { + type NoMethod AuditLogConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Binding: Associates `members` with a `role`. +type Binding struct { + // Members: Specifies the identities requesting access for a Cloud + // Platform resource. + // `members` can have the following values: + // + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents + // anyone + // who is authenticated with a Google account or a service + // account. + // + // * `user:{emailid}`: An email address that represents a specific + // Google + // account. For example, `alice@gmail.com` or `joe@example.com`. + // + // + // * `serviceAccount:{emailid}`: An email address that represents a + // service + // account. For example, + // `my-other-app@appspot.gserviceaccount.com`. + // + // * `group:{emailid}`: An email address that represents a Google + // group. + // For example, `admins@example.com`. + // + // + // * `domain:{domain}`: A Google Apps domain name that represents all + // the + // users of that domain. For example, `google.com` or + // `example.com`. + // + // + Members []string `json:"members,omitempty"` + + // Role: Role that is assigned to `members`. + // For example, `roles/viewer`, `roles/editor`, or + // `roles/owner`. + // Required + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Members") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Members") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Binding) MarshalJSON() ([]byte, error) { + type NoMethod Binding + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Folder: A Folder in an Organization's resource hierarchy, used +// to +// organize that Organization's resources. +type Folder struct { + // CreateTime: Output only. Timestamp when the Folder was created. + // Assigned by the server. + CreateTime string `json:"createTime,omitempty"` + + // DisplayName: The folder’s display name. + // A folder’s display name must be unique amongst its siblings, + // e.g. + // no two folders with the same parent can share the same display + // name. + // The display name must start and end with a letter or digit, may + // contain + // letters, digits, spaces, hyphens and underscores and can be no + // longer + // than 30 characters. This is captured by the regular + // expression: + // [\p{L}\p{N}]({\p{L}\p{N}_- ]{0,28}[\p{L}\p{N}])?. + DisplayName string `json:"displayName,omitempty"` + + // LifecycleState: Output only. The lifecycle state of the + // folder. + // Updates to the lifecycle_state must be performed via + // DeleteFolder and + // UndeleteFolder. + // + // Possible values: + // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. + // "ACTIVE" - The normal and active state. + // "DELETE_REQUESTED" - The folder has been marked for deletion by the + // user. + LifecycleState string `json:"lifecycleState,omitempty"` + + // Name: Output only. The resource name of the Folder. + // Its format is `folders/{folder_id}`, for example: "folders/1234". + Name string `json:"name,omitempty"` + + // Parent: The Folder’s parent's resource name. + // Updates to the folder's parent must be performed via + // MoveFolder. + Parent string `json:"parent,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Folder) MarshalJSON() ([]byte, error) { + type NoMethod Folder + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FolderOperation: Metadata describing a long running folder operation +type FolderOperation struct { + // DestinationParent: The resource name of the folder or organization we + // are either creating + // the folder under or moving the folder to. + DestinationParent string `json:"destinationParent,omitempty"` + + // DisplayName: The display name of the folder. + DisplayName string `json:"displayName,omitempty"` + + // OperationType: The type of this operation. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Operation type not specified. + // "CREATE" - A create folder operation. + // "MOVE" - A move folder operation. + OperationType string `json:"operationType,omitempty"` + + // SourceParent: The resource name of the folder's parent. + // Only applicable when the operation_type is MOVE. + SourceParent string `json:"sourceParent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationParent") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationParent") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *FolderOperation) MarshalJSON() ([]byte, error) { + type NoMethod FolderOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// FolderOperationError: A classification of the Folder Operation error. +type FolderOperationError struct { + // ErrorMessageId: The type of operation error experienced. + // + // Possible values: + // "ERROR_TYPE_UNSPECIFIED" - The error type was unrecognized or + // unspecified. + // "ACTIVE_FOLDER_HEIGHT_VIOLATION" - The attempted action would + // violate the max folder depth constraint. + // "MAX_CHILD_FOLDERS_VIOLATION" - The attempted action would violate + // the max child folders constraint. + // "FOLDER_NAME_UNIQUENESS_VIOLATION" - The attempted action would + // violate the locally-unique folder + // display_name constraint. + // "RESOURCE_DELETED_VIOLATION" - The resource being moved has been + // deleted. + // "PARENT_DELETED_VIOLATION" - The resource a folder was being added + // to has been deleted. + // "CYCLE_INTRODUCED_VIOLATION" - The attempted action would introduce + // cycle in resource path. + // "FOLDER_BEING_MOVED_VIOLATION" - The attempted action would move a + // folder that is already being moved. + // "FOLDER_TO_DELETE_NON_EMPTY_VIOLATION" - The folder the caller is + // trying to delete contains active resources. + // "DELETED_FOLDER_HEIGHT_VIOLATION" - The attempted action would + // violate the max deleted folder depth + // constraint. + ErrorMessageId string `json:"errorMessageId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ErrorMessageId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ErrorMessageId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *FolderOperationError) MarshalJSON() ([]byte, error) { + type NoMethod FolderOperationError + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GetIamPolicyRequest: Request message for `GetIamPolicy` method. +type GetIamPolicyRequest struct { +} + +// ListFoldersResponse: The ListFolders response message. +type ListFoldersResponse struct { + // Folders: A possibly paginated list of Folders that are direct + // descendants of + // the specified parent resource. + Folders []*Folder `json:"folders,omitempty"` + + // NextPageToken: A pagination token returned from a previous call to + // `ListFolders` + // that indicates from where listing should continue. + // This field is optional. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Folders") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Folders") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ListFoldersResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListFoldersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MoveFolderRequest: The MoveFolder request message. +type MoveFolderRequest struct { + // DestinationParent: The resource name of the Folder or Organization to + // reparent + // the folder under. + // Must be of the form `folders/{folder_id}` or + // `organizations/{org_id}`. + DestinationParent string `json:"destinationParent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationParent") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationParent") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MoveFolderRequest) MarshalJSON() ([]byte, error) { + type NoMethod MoveFolderRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Operation: This resource represents a long-running operation that is +// the result of a +// network API call. +type Operation struct { + // Done: If the value is `false`, it means the operation is still in + // progress. + // If `true`, the operation is completed, and either `error` or + // `response` is + // available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *Status `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. + // It typically + // contains progress information and common metadata such as create + // time. + // Some services might not provide such metadata. Any method that + // returns a + // long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that + // originally returns it. If you use the default HTTP mapping, + // the + // `name` should have the format of `operations/some/unique/name`. + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. + // If the original + // method returns no data on success, such as `Delete`, the response + // is + // `google.protobuf.Empty`. If the original method is + // standard + // `Get`/`Create`/`Update`, the response should be the resource. For + // other + // methods, the response should have the type `XxxResponse`, where + // `Xxx` + // is the original method name. For example, if the original method + // name + // is `TakeSnapshot()`, the inferred response type + // is + // `TakeSnapshotResponse`. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type NoMethod Operation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Policy: Defines an Identity and Access Management (IAM) policy. It is +// used to +// specify access control policies for Cloud Platform resources. +// +// +// A `Policy` consists of a list of `bindings`. A `Binding` binds a list +// of +// `members` to a `role`, where the members can be user accounts, Google +// groups, +// Google domains, and service accounts. A `role` is a named list of +// permissions +// defined by IAM. +// +// **Example** +// +// { +// "bindings": [ +// { +// "role": "roles/owner", +// "members": [ +// "user:mike@example.com", +// "group:admins@example.com", +// "domain:google.com", +// +// "serviceAccount:my-other-app@appspot.gserviceaccount.com", +// ] +// }, +// { +// "role": "roles/viewer", +// "members": ["user:sean@example.com"] +// } +// ] +// } +// +// For a description of IAM and its features, see the +// [IAM developer's guide](https://cloud.google.com/iam/docs). +type Policy struct { + // AuditConfigs: Specifies cloud audit logging configuration for this + // policy. + AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` + + // Bindings: Associates a list of `members` to a `role`. + // `bindings` with no members will result in an error. + Bindings []*Binding `json:"bindings,omitempty"` + + // Etag: `etag` is used for optimistic concurrency control as a way to + // help + // prevent simultaneous updates of a policy from overwriting each + // other. + // It is strongly suggested that systems make use of the `etag` in + // the + // read-modify-write cycle to perform policy updates in order to avoid + // race + // conditions: An `etag` is returned in the response to `getIamPolicy`, + // and + // systems are expected to put that etag in the request to + // `setIamPolicy` to + // ensure that their change will be applied to the same version of the + // policy. + // + // If no `etag` is provided in the call to `setIamPolicy`, then the + // existing + // policy is overwritten blindly. + Etag string `json:"etag,omitempty"` + + // Version: Deprecated. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AuditConfigs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuditConfigs") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ProjectCreationStatus: A status object which is used as the +// `metadata` field for the Operation +// returned by CreateProject. It provides insight for when significant +// phases of +// Project creation have completed. +type ProjectCreationStatus struct { + // CreateTime: Creation time of the project creation workflow. + CreateTime string `json:"createTime,omitempty"` + + // Gettable: True if the project can be retrieved using GetProject. No + // other operations + // on the project are guaranteed to work until the project creation + // is + // complete. + Gettable bool `json:"gettable,omitempty"` + + // Ready: True if the project creation process is complete. + Ready bool `json:"ready,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { + type NoMethod ProjectCreationStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SearchFoldersRequest: The request message for searching folders. +type SearchFoldersRequest struct { + // PageSize: The maximum number of folders to return in the + // response. + // This field is optional. + PageSize int64 `json:"pageSize,omitempty"` + + // PageToken: A pagination token returned from a previous call to + // `SearchFolders` + // that indicates from where search should continue. + // This field is optional. + PageToken string `json:"pageToken,omitempty"` + + // Query: Search criteria used to select the Folders to return. + // If no search criteria is specified then all accessible folders will + // be + // returned. + // + // Query expressions can be used to restrict results based upon + // displayName, + // lifecycleState and parent, where the operators `=`, `NOT`, `AND` and + // `OR` + // can be used along with the suffix wildcard symbol `*`. + // + // Some example queries are: + // + // |Query | Description| + // |----- | -----------| + // |displayName=Test* | Folders whose display name starts with + // "Test". + // |lifecycleState=ACTIVE | Folders whose lifecycleState is + // ACTIVE. + // |parent=folders/123 | Folders whose parent is + // "folders/123". + // |parent=folders/123 AND lifecycleState=ACTIVE | Active folders whose + // parent is "folders/123".| + Query string `json:"query,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PageSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PageSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SearchFoldersRequest) MarshalJSON() ([]byte, error) { + type NoMethod SearchFoldersRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SearchFoldersResponse: The response message for searching folders. +type SearchFoldersResponse struct { + // Folders: A possibly paginated folder search results. + // the specified parent resource. + Folders []*Folder `json:"folders,omitempty"` + + // NextPageToken: A pagination token returned from a previous call to + // `SearchFolders` + // that indicates from where searching should continue. + // This field is optional. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Folders") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Folders") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SearchFoldersResponse) MarshalJSON() ([]byte, error) { + type NoMethod SearchFoldersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SetIamPolicyRequest: Request message for `SetIamPolicy` method. +type SetIamPolicyRequest struct { + // Policy: REQUIRED: The complete policy to be applied to the + // `resource`. The size of + // the policy is limited to a few 10s of KB. An empty policy is a + // valid policy but certain Cloud Platform services (such as + // Projects) + // might reject them. + Policy *Policy `json:"policy,omitempty"` + + // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the + // policy to modify. Only + // the fields in the mask will be modified. If no mask is provided, + // the + // following default mask is used: + // paths: "bindings, etag" + // This field is only used by Cloud IAM. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Policy") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Policy") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod SetIamPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of + // message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsRequest: Request message for `TestIamPermissions` +// method. +type TestIamPermissionsRequest struct { + // Permissions: The set of permissions to check for the `resource`. + // Permissions with + // wildcards (such as '*' or 'storage.*') are not allowed. For + // more + // information see + // [IAM + // Overview](https://cloud.google.com/iam/docs/overview#permissions). + Permissions []string `json:"permissions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: Response message for `TestIamPermissions` +// method. +type TestIamPermissionsResponse struct { + // Permissions: A subset of `TestPermissionsRequest.permissions` that + // the caller is + // allowed. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Permissions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Permissions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UndeleteFolderRequest: The UndeleteFolder request message. +type UndeleteFolderRequest struct { +} + +// method id "cloudresourcemanager.folders.create": + +type FoldersCreateCall struct { + s *Service + folder *Folder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a Folder in the resource hierarchy. +// Returns an Operation which can be used to track the progress of +// the +// folder creation workflow. +// Upon success the Operation.response field will be populated with +// the +// created Folder. +// +// In order to succeed, the addition of this new Folder must not +// violate +// the Folder naming, height or fanout constraints. +// +// + The Folder's display_name must be distinct from all other Folder's +// that +// share its parent. +// + The addition of the Folder must not cause the active Folder +// hierarchy +// to exceed a height of 4. Note, the full active + deleted Folder +// hierarchy +// is allowed to reach a height of 8; this provides additional headroom +// when +// moving folders that contain deleted folders. +// + The addition of the Folder must not cause the total number of +// Folders +// under its parent to exceed 100. +// +// If the operation fails due to a folder constraint violation, +// a PreconditionFailure explaining the violation will be returned. +// If the failure occurs synchronously then the PreconditionFailure +// will be returned via the Status.details field and if it +// occurs +// asynchronously then the PreconditionFailure will be returned +// via the the Operation.error field. +// +// The caller must have `resourcemanager.folders.create` permission on +// the +// identified parent. +func (r *FoldersService) Create(folder *Folder) *FoldersCreateCall { + c := &FoldersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.folder = folder + return c +} + +// Parent sets the optional parameter "parent": The resource name of the +// new Folder's parent. +// Must be of the form `folders/{folder_id}` or +// `organizations/{org_id}`. +func (c *FoldersCreateCall) Parent(parent string) *FoldersCreateCall { + c.urlParams_.Set("parent", parent) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersCreateCall) Fields(s ...googleapi.Field) *FoldersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersCreateCall) Context(ctx context.Context) *FoldersCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/folders") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Folder in the resource hierarchy.\nReturns an Operation which can be used to track the progress of the\nfolder creation workflow.\nUpon success the Operation.response field will be populated with the\ncreated Folder.\n\nIn order to succeed, the addition of this new Folder must not violate\nthe Folder naming, height or fanout constraints.\n\n+ The Folder's display_name must be distinct from all other Folder's that\nshare its parent.\n+ The addition of the Folder must not cause the active Folder hierarchy\nto exceed a height of 4. Note, the full active + deleted Folder hierarchy\nis allowed to reach a height of 8; this provides additional headroom when\nmoving folders that contain deleted folders.\n+ The addition of the Folder must not cause the total number of Folders\nunder its parent to exceed 100.\n\nIf the operation fails due to a folder constraint violation,\na PreconditionFailure explaining the violation will be returned.\nIf the failure occurs synchronously then the PreconditionFailure\nwill be returned via the Status.details field and if it occurs\nasynchronously then the PreconditionFailure will be returned\nvia the the Operation.error field.\n\nThe caller must have `resourcemanager.folders.create` permission on the\nidentified parent.", + // "flatPath": "v2/folders", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.create", + // "parameterOrder": [], + // "parameters": { + // "parent": { + // "description": "The resource name of the new Folder's parent.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/folders", + // "request": { + // "$ref": "Folder" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.delete": + +type FoldersDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Requests deletion of a Folder. The Folder is moved into +// the +// DELETE_REQUESTED state +// immediately, and is deleted approximately 30 days later. This method +// may +// only be called on an empty Folder in the +// ACTIVE state, where a Folder is empty if +// it doesn't contain any Folders or Projects in the +// ACTIVE state. +// The caller must have `resourcemanager.folders.delete` permission on +// the +// identified folder. +func (r *FoldersService) Delete(name string) *FoldersDeleteCall { + c := &FoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersDeleteCall) Fields(s ...googleapi.Field) *FoldersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersDeleteCall) Context(ctx context.Context) *FoldersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.delete" call. +// Exactly one of *Folder or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) (*Folder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Folder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Requests deletion of a Folder. The Folder is moved into the\nDELETE_REQUESTED state\nimmediately, and is deleted approximately 30 days later. This method may\nonly be called on an empty Folder in the\nACTIVE state, where a Folder is empty if\nit doesn't contain any Folders or Projects in the\nACTIVE state.\nThe caller must have `resourcemanager.folders.delete` permission on the\nidentified folder.", + // "flatPath": "v2/folders/{foldersId}", + // "httpMethod": "DELETE", + // "id": "cloudresourcemanager.folders.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "the resource name of the Folder to be deleted.\nMust be of the form `folders/{folder_id}`.", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "Folder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.get": + +type FoldersGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves a Folder identified by the supplied resource +// name. +// Valid Folder resource names have the format +// `folders/{folder_id}` +// (for example, `folders/1234`). +// The caller must have `resourcemanager.folders.get` permission on +// the +// identified folder. +func (r *FoldersService) Get(name string) *FoldersGetCall { + c := &FoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersGetCall) Fields(s ...googleapi.Field) *FoldersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FoldersGetCall) IfNoneMatch(entityTag string) *FoldersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersGetCall) Context(ctx context.Context) *FoldersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.get" call. +// Exactly one of *Folder or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Folder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a Folder identified by the supplied resource name.\nValid Folder resource names have the format `folders/{folder_id}`\n(for example, `folders/1234`).\nThe caller must have `resourcemanager.folders.get` permission on the\nidentified folder.", + // "flatPath": "v2/folders/{foldersId}", + // "httpMethod": "GET", + // "id": "cloudresourcemanager.folders.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the Folder to retrieve.\nMust be of the form `folders/{folder_id}`.", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "Folder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.getIamPolicy": + +type FoldersGetIamPolicyCall struct { + s *Service + resource string + getiampolicyrequest *GetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a Folder. The +// returned policy may be +// empty if no such policy or resource exists. The `resource` field +// should +// be the Folder's resource name, e.g. "folders/1234". +// The caller must have `resourcemanager.folders.getIamPolicy` +// permission +// on the identified folder. +func (r *FoldersService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *FoldersGetIamPolicyCall { + c := &FoldersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.getiampolicyrequest = getiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersGetIamPolicyCall) Fields(s ...googleapi.Field) *FoldersGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersGetIamPolicyCall) Context(ctx context.Context) *FoldersGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a Folder. The returned policy may be\nempty if no such policy or resource exists. The `resource` field should\nbe the Folder's resource name, e.g. \"folders/1234\".\nThe caller must have `resourcemanager.folders.getIamPolicy` permission\non the identified folder.", + // "flatPath": "v2/folders/{foldersId}:getIamPolicy", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.getIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:getIamPolicy", + // "request": { + // "$ref": "GetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.list": + +type FoldersListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the Folders that are direct descendants of supplied +// parent resource. +// List provides a strongly consistent view of the Folders +// underneath +// the specified parent resource. +// List returns Folders sorted based upon the (ascending) lexical +// ordering +// of their display_name. +// The caller must have `resourcemanager.folders.list` permission on +// the +// identified parent. +func (r *FoldersService) List() *FoldersListCall { + c := &FoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of Folders to return in the response. +// This field is optional. +func (c *FoldersListCall) PageSize(pageSize int64) *FoldersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A pagination token +// returned from a previous call to `ListFolders` +// that indicates where this listing should continue from. +// This field is optional. +func (c *FoldersListCall) PageToken(pageToken string) *FoldersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Parent sets the optional parameter "parent": The resource name of the +// Organization or Folder whose Folders are +// being listed. +// Must be of the form `folders/{folder_id}` or +// `organizations/{org_id}`. +// Access to this method is controlled by checking +// the +// `resourcemanager.folders.list` permission on the `parent`. +func (c *FoldersListCall) Parent(parent string) *FoldersListCall { + c.urlParams_.Set("parent", parent) + return c +} + +// ShowDeleted sets the optional parameter "showDeleted": Controls +// whether Folders in the +// DELETE_REQUESTED +// state should be returned. Defaults to false. This field is optional. +func (c *FoldersListCall) ShowDeleted(showDeleted bool) *FoldersListCall { + c.urlParams_.Set("showDeleted", fmt.Sprint(showDeleted)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersListCall) Fields(s ...googleapi.Field) *FoldersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *FoldersListCall) IfNoneMatch(entityTag string) *FoldersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersListCall) Context(ctx context.Context) *FoldersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/folders") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.list" call. +// Exactly one of *ListFoldersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListFoldersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*ListFoldersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListFoldersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the Folders that are direct descendants of supplied parent resource.\nList provides a strongly consistent view of the Folders underneath\nthe specified parent resource.\nList returns Folders sorted based upon the (ascending) lexical ordering\nof their display_name.\nThe caller must have `resourcemanager.folders.list` permission on the\nidentified parent.", + // "flatPath": "v2/folders", + // "httpMethod": "GET", + // "id": "cloudresourcemanager.folders.list", + // "parameterOrder": [], + // "parameters": { + // "pageSize": { + // "description": "The maximum number of Folders to return in the response.\nThis field is optional.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A pagination token returned from a previous call to `ListFolders`\nthat indicates where this listing should continue from.\nThis field is optional.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The resource name of the Organization or Folder whose Folders are\nbeing listed.\nMust be of the form `folders/{folder_id}` or `organizations/{org_id}`.\nAccess to this method is controlled by checking the\n`resourcemanager.folders.list` permission on the `parent`.", + // "location": "query", + // "type": "string" + // }, + // "showDeleted": { + // "description": "Controls whether Folders in the\nDELETE_REQUESTED\nstate should be returned. Defaults to false. This field is optional.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "v2/folders", + // "response": { + // "$ref": "ListFoldersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FoldersListCall) Pages(ctx context.Context, f func(*ListFoldersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "cloudresourcemanager.folders.move": + +type FoldersMoveCall struct { + s *Service + name string + movefolderrequest *MoveFolderRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves a Folder under a new resource parent. +// Returns an Operation which can be used to track the progress of +// the +// folder move workflow. +// Upon success the Operation.response field will be populated with +// the +// moved Folder. +// Upon failure, a FolderOperationError categorizing the failure cause +// will +// be returned - if the failure occurs synchronously then +// the +// FolderOperationError will be returned via the Status.details +// field +// and if it occurs asynchronously then the FolderOperation will be +// returned +// via the the Operation.error field. +// In addition, the Operation.metadata field will be populated with +// a +// FolderOperation message as an aid to stateless clients. +// Folder moves will be rejected if they violate either the naming, +// height +// or fanout constraints described in the +// CreateFolder documentation. +// The caller must have `resourcemanager.folders.move` permission on +// the +// folder's current and proposed new parent. +func (r *FoldersService) Move(name string, movefolderrequest *MoveFolderRequest) *FoldersMoveCall { + c := &FoldersMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.movefolderrequest = movefolderrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersMoveCall) Fields(s ...googleapi.Field) *FoldersMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersMoveCall) Context(ctx context.Context) *FoldersMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.movefolderrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:move") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.move" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *FoldersMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves a Folder under a new resource parent.\nReturns an Operation which can be used to track the progress of the\nfolder move workflow.\nUpon success the Operation.response field will be populated with the\nmoved Folder.\nUpon failure, a FolderOperationError categorizing the failure cause will\nbe returned - if the failure occurs synchronously then the\nFolderOperationError will be returned via the Status.details field\nand if it occurs asynchronously then the FolderOperation will be returned\nvia the the Operation.error field.\nIn addition, the Operation.metadata field will be populated with a\nFolderOperation message as an aid to stateless clients.\nFolder moves will be rejected if they violate either the naming, height\nor fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.move` permission on the\nfolder's current and proposed new parent.", + // "flatPath": "v2/folders/{foldersId}:move", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.move", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the Folder to move.\nMust be of the form folders/{folder_id}", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}:move", + // "request": { + // "$ref": "MoveFolderRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.patch": + +type FoldersPatchCall struct { + s *Service + name string + folder *Folder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a Folder, changing its display_name. +// Changes to the folder display_name will be rejected if they violate +// either +// the display_name formatting rules or naming constraints described +// in +// the CreateFolder documentation. +// +// The Folder's display name must start and end with a letter or +// digit, +// may contain letters, digits, spaces, hyphens and underscores and can +// be +// no longer than 30 characters. This is captured by the regular +// expression: +// [\p{L}\p{N}]({\p{L}\p{N}_- ]{0,28}[\p{L}\p{N}])?. +// The caller must have `resourcemanager.folders.update` permission on +// the +// identified folder. +// +// If the update fails due to the unique name constraint then +// a +// PreconditionFailure explaining this violation will be returned +// in the Status.details field. +func (r *FoldersService) Patch(name string, folder *Folder) *FoldersPatchCall { + c := &FoldersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.folder = folder + return c +} + +// UpdateMask sets the optional parameter "updateMask": Fields to be +// updated. +// Only the `display_name` can be updated. +func (c *FoldersPatchCall) UpdateMask(updateMask string) *FoldersPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersPatchCall) Fields(s ...googleapi.Field) *FoldersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersPatchCall) Context(ctx context.Context) *FoldersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.patch" call. +// Exactly one of *Folder or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersPatchCall) Do(opts ...googleapi.CallOption) (*Folder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Folder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a Folder, changing its display_name.\nChanges to the folder display_name will be rejected if they violate either\nthe display_name formatting rules or naming constraints described in\nthe CreateFolder documentation.\n\nThe Folder's display name must start and end with a letter or digit,\nmay contain letters, digits, spaces, hyphens and underscores and can be\nno longer than 30 characters. This is captured by the regular expression:\n[\\p{L}\\p{N}]({\\p{L}\\p{N}_- ]{0,28}[\\p{L}\\p{N}])?.\nThe caller must have `resourcemanager.folders.update` permission on the\nidentified folder.\n\nIf the update fails due to the unique name constraint then a\nPreconditionFailure explaining this violation will be returned\nin the Status.details field.", + // "flatPath": "v2/folders/{foldersId}", + // "httpMethod": "PATCH", + // "id": "cloudresourcemanager.folders.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Output only. The resource name of the Folder.\nIts format is `folders/{folder_id}`, for example: \"folders/1234\".", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Fields to be updated.\nOnly the `display_name` can be updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "Folder" + // }, + // "response": { + // "$ref": "Folder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.search": + +type FoldersSearchCall struct { + s *Service + searchfoldersrequest *SearchFoldersRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Search: Search for folders that match specific filter +// criteria. +// Search provides an eventually consistent view of the folders a user +// has +// access to which meet the specified filter criteria. +// +// This will only return folders on which the caller has the +// permission `resourcemanager.folders.get`. +func (r *FoldersService) Search(searchfoldersrequest *SearchFoldersRequest) *FoldersSearchCall { + c := &FoldersSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.searchfoldersrequest = searchfoldersrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersSearchCall) Fields(s ...googleapi.Field) *FoldersSearchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersSearchCall) Context(ctx context.Context) *FoldersSearchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersSearchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersSearchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchfoldersrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/folders:search") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.search" call. +// Exactly one of *SearchFoldersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SearchFoldersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FoldersSearchCall) Do(opts ...googleapi.CallOption) (*SearchFoldersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &SearchFoldersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Search for folders that match specific filter criteria.\nSearch provides an eventually consistent view of the folders a user has\naccess to which meet the specified filter criteria.\n\nThis will only return folders on which the caller has the\npermission `resourcemanager.folders.get`.", + // "flatPath": "v2/folders:search", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.search", + // "parameterOrder": [], + // "parameters": {}, + // "path": "v2/folders:search", + // "request": { + // "$ref": "SearchFoldersRequest" + // }, + // "response": { + // "$ref": "SearchFoldersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *FoldersSearchCall) Pages(ctx context.Context, f func(*SearchFoldersResponse) error) error { + c.ctx_ = ctx + defer func(pt string) { c.searchfoldersrequest.PageToken = pt }(c.searchfoldersrequest.PageToken) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.searchfoldersrequest.PageToken = x.NextPageToken + } +} + +// method id "cloudresourcemanager.folders.setIamPolicy": + +type FoldersSetIamPolicyCall struct { + s *Service + resource string + setiampolicyrequest *SetIamPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on a Folder, replacing +// any existing policy. +// The `resource` field should be the Folder's resource name, +// e.g. +// "folders/1234". +// The caller must have `resourcemanager.folders.setIamPolicy` +// permission +// on the identified folder. +func (r *FoldersService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *FoldersSetIamPolicyCall { + c := &FoldersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.setiampolicyrequest = setiampolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersSetIamPolicyCall) Fields(s ...googleapi.Field) *FoldersSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersSetIamPolicyCall) Context(ctx context.Context) *FoldersSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on a Folder, replacing any existing policy.\nThe `resource` field should be the Folder's resource name, e.g.\n\"folders/1234\".\nThe caller must have `resourcemanager.folders.setIamPolicy` permission\non the identified folder.", + // "flatPath": "v2/folders/{foldersId}:setIamPolicy", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.setIamPolicy", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:setIamPolicy", + // "request": { + // "$ref": "SetIamPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.testIamPermissions": + +type FoldersTestIamPermissionsCall struct { + s *Service + resource string + testiampermissionsrequest *TestIamPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified Folder. +// The `resource` field should be the Folder's resource name, +// e.g. "folders/1234". +// +// There are no permissions required for making this API call. +func (r *FoldersService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *FoldersTestIamPermissionsCall { + c := &FoldersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.resource = resource + c.testiampermissionsrequest = testiampermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersTestIamPermissionsCall) Fields(s ...googleapi.Field) *FoldersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersTestIamPermissionsCall) Context(ctx context.Context) *FoldersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *FoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified Folder.\nThe `resource` field should be the Folder's resource name,\ne.g. \"folders/1234\".\n\nThere are no permissions required for making this API call.", + // "flatPath": "v2/folders/{foldersId}:testIamPermissions", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.testIamPermissions", + // "parameterOrder": [ + // "resource" + // ], + // "parameters": { + // "resource": { + // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+resource}:testIamPermissions", + // "request": { + // "$ref": "TestIamPermissionsRequest" + // }, + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "cloudresourcemanager.folders.undelete": + +type FoldersUndeleteCall struct { + s *Service + name string + undeletefolderrequest *UndeleteFolderRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Undelete: Cancels the deletion request for a Folder. This method may +// only be +// called on a Folder in the +// DELETE_REQUESTED state. +// In order to succeed, the Folder's parent must be in the +// ACTIVE state. +// In addition, reintroducing the folder into the tree must not +// violate +// folder naming, height and fanout constraints described in +// the +// CreateFolder documentation. +// The caller must have `resourcemanager.folders.undelete` permission on +// the +// identified folder. +func (r *FoldersService) Undelete(name string, undeletefolderrequest *UndeleteFolderRequest) *FoldersUndeleteCall { + c := &FoldersUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.undeletefolderrequest = undeletefolderrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *FoldersUndeleteCall) Fields(s ...googleapi.Field) *FoldersUndeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *FoldersUndeleteCall) Context(ctx context.Context) *FoldersUndeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *FoldersUndeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *FoldersUndeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeletefolderrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:undelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "cloudresourcemanager.folders.undelete" call. +// Exactly one of *Folder or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Folder.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *FoldersUndeleteCall) Do(opts ...googleapi.CallOption) (*Folder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Folder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Cancels the deletion request for a Folder. This method may only be\ncalled on a Folder in the\nDELETE_REQUESTED state.\nIn order to succeed, the Folder's parent must be in the\nACTIVE state.\nIn addition, reintroducing the folder into the tree must not violate\nfolder naming, height and fanout constraints described in the\nCreateFolder documentation.\nThe caller must have `resourcemanager.folders.undelete` permission on the\nidentified folder.", + // "flatPath": "v2/folders/{foldersId}:undelete", + // "httpMethod": "POST", + // "id": "cloudresourcemanager.folders.undelete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name of the Folder to undelete.\nMust be of the form `folders/{folder_id}`.", + // "location": "path", + // "pattern": "^folders/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}:undelete", + // "request": { + // "$ref": "UndeleteFolderRequest" + // }, + // "response": { + // "$ref": "Folder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json index 92a6ed3ff..3e305c8fb 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-api.json @@ -12,7 +12,7 @@ } }, "basePath": "", - "baseUrl": "https://content-cloudresourcemanager.googleapis.com/", + "baseUrl": "https://cloudresourcemanager.googleapis.com/", "batchPath": "batch", "canonicalName": "Cloud Resource Manager", "description": "The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.", @@ -430,8 +430,8 @@ } } }, - "revision": "20180305", - "rootUrl": "https://content-cloudresourcemanager.googleapis.com/", + "revision": "20180315", + "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "AuditConfig": { "description": "Specifies the audit configuration for a service.\nThe configuration determines which permission types are logged, and what\nidentities, if any, are exempted from logging.\nAn AuditConfig must have one or more AuditLogConfigs.\n\nIf there are AuditConfigs for both `allServices` and a specific service,\nthe union of the two AuditConfigs is used for that service: the log_types\nspecified in each AuditConfig are enabled, and the exempted_members in each\nAuditLogConfig are exempted.\n\nExample Policy with multiple AuditConfigs:\n\n {\n \"audit_configs\": [\n {\n \"service\": \"allServices\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n \"exempted_members\": [\n \"user:foo@gmail.com\"\n ]\n },\n {\n \"log_type\": \"DATA_WRITE\",\n },\n {\n \"log_type\": \"ADMIN_READ\",\n }\n ]\n },\n {\n \"service\": \"fooservice.googleapis.com\"\n \"audit_log_configs\": [\n {\n \"log_type\": \"DATA_READ\",\n },\n {\n \"log_type\": \"DATA_WRITE\",\n \"exempted_members\": [\n \"user:bar@gmail.com\"\n ]\n }\n ]\n }\n ]\n }\n\nFor fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ\nlogging. It also exempts foo@gmail.com from DATA_READ logging, and\nbar@gmail.com from DATA_WRITE logging.", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go index de0cf7dd9..950faed52 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v2beta1/cloudresourcemanager-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "cloudresourcemanager:v2beta1" const apiName = "cloudresourcemanager" const apiVersion = "v2beta1" -const basePath = "https://content-cloudresourcemanager.googleapis.com/" +const basePath = "https://cloudresourcemanager.googleapis.com/" // OAuth2 scopes used by this API. const ( diff --git a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json index a6b8c4b48..4c4c99233 100644 --- a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json +++ b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-api.json @@ -261,7 +261,7 @@ } } }, - "revision": "20180130", + "revision": "20180312", "rootUrl": "https://cloudtrace.googleapis.com/", "schemas": { "Empty": { @@ -337,7 +337,7 @@ "additionalProperties": { "type": "string" }, - "description": "Collection of labels associated with the span. Label keys must be less than\n128 bytes. Label values must be less than 16 kilobytes (10MB for\n`/stacktrace` values).\n\nSome predefined label keys exist, or you may create your own. When creating\nyour own, we recommend the following formats:\n\n* `/category/product/key` for agents of well-known products (e.g.\n `/db/mongodb/read_size`).\n* `short_host/path/key` for domain-specific keys (e.g.\n `foo.com/myproduct/bar`)\n\nPredefined labels include:\n\n* `/agent`\n* `/component`\n* `/error/message`\n* `/error/name`\n* `/http/client_city`\n* `/http/client_country`\n* `/http/client_protocol`\n* `/http/client_region`\n* `/http/host`\n* `/http/method`\n* `/http/redirected_url`\n* `/http/request/size`\n* `/http/response/size`\n* `/http/status_code`\n* `/http/url`\n* `/http/user_agent`\n* `/pid`\n* `/stacktrace`\n* `/tid`", + "description": "Collection of labels associated with the span. Label keys must be less than\n128 bytes. Label values must be less than 16 kilobytes (10MB for\n`/stacktrace` values).\n\nSome predefined label keys exist, or you may create your own. When creating\nyour own, we recommend the following formats:\n\n* `/category/product/key` for agents of well-known products (e.g.\n `/db/mongodb/read_size`).\n* `short_host/path/key` for domain-specific keys (e.g.\n `foo.com/myproduct/bar`)\n\nPredefined labels include:\n\n* `/agent`\n* `/component`\n* `/error/message`\n* `/error/name`\n* `/http/client_city`\n* `/http/client_country`\n* `/http/client_protocol`\n* `/http/client_region`\n* `/http/host`\n* `/http/method`\n* `/http/path`\n* `/http/redirected_url`\n* `/http/request/size`\n* `/http/response/size`\n* `/http/route`\n* `/http/status_code`\n* `/http/url`\n* `/http/user_agent`\n* `/pid`\n* `/stacktrace`\n* `/tid`", "type": "object" }, "name": { diff --git a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go index d692d2ab8..39bc34064 100644 --- a/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go +++ b/vendor/google.golang.org/api/cloudtrace/v1/cloudtrace-gen.go @@ -261,9 +261,11 @@ type TraceSpan struct { // * `/http/client_region` // * `/http/host` // * `/http/method` + // * `/http/path` // * `/http/redirected_url` // * `/http/request/size` // * `/http/response/size` + // * `/http/route` // * `/http/status_code` // * `/http/url` // * `/http/user_agent` diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index be9d128be..89f81d79d 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/N-OFBDyIh2rjB3cbDrFel2ULy4k\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/DTYBk6OTvNJPRyXmKG4YaDHGS0g\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -97,7 +97,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -189,7 +189,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -249,7 +249,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -428,7 +428,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -582,7 +582,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -761,7 +761,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1183,7 +1183,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1431,7 +1431,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1665,7 +1665,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1918,7 +1918,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2010,7 +2010,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2070,7 +2070,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2348,7 +2348,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2700,7 +2700,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2872,7 +2872,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3051,7 +3051,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3408,7 +3408,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3639,7 +3639,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3847,7 +3847,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3960,7 +3960,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4118,7 +4118,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4290,7 +4290,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4382,7 +4382,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4442,7 +4442,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4663,7 +4663,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4917,7 +4917,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5194,7 +5194,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5588,7 +5588,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5797,7 +5797,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6102,7 +6102,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6218,7 +6218,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6803,7 +6803,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6977,7 +6977,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7034,7 +7034,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7346,7 +7346,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7541,7 +7541,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8038,7 +8038,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8096,7 +8096,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9300,7 +9300,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9521,7 +9521,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9802,7 +9802,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10031,7 +10031,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10521,7 +10521,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10647,7 +10647,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10739,7 +10739,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10799,7 +10799,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11020,7 +11020,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11169,7 +11169,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11437,7 +11437,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11494,7 +11494,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11753,7 +11753,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11803,7 +11803,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12920,7 +12920,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13293,7 +13293,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13496,7 +13496,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13629,7 +13629,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13777,7 +13777,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14024,7 +14024,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14530,7 +14530,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14646,7 +14646,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15174,7 +15174,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15231,7 +15231,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15472,7 +15472,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15566,7 +15566,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15619,7 +15619,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15840,7 +15840,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16193,7 +16193,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16446,6 +16446,11 @@ "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", "location": "query", "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" } }, "path": "{project}/global/securityPolicies", @@ -16469,7 +16474,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16792,7 +16797,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17059,7 +17064,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17252,7 +17257,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17301,7 +17306,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17431,7 +17436,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17701,7 +17706,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17757,7 +17762,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -18102,7 +18107,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -18338,7 +18343,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -18590,7 +18595,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -18769,7 +18774,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -18972,7 +18977,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -19196,7 +19201,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -19559,7 +19564,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -19917,7 +19922,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -20089,7 +20094,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -20268,7 +20273,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -20568,7 +20573,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -20776,7 +20781,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -20955,7 +20960,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -21190,7 +21195,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -21284,7 +21289,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -21327,7 +21332,7 @@ } } }, - "revision": "20180220", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -24102,6 +24107,20 @@ "statusMessage": { "description": "[Output Only] An optional, human-readable explanation of the status.", "type": "string" + }, + "type": { + "description": "The type of commitment, which affects the discount rate and the eligible resources. Type LARGE_VM specifies a commitment that will only apply to large VMs. Type NORMAL specifies a commitment that applies to all other resources.", + "enum": [ + "LARGE_VM", + "NORMAL", + "TYPE_UNSPECIFIED" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" } }, "type": "object" @@ -31126,6 +31145,7 @@ "type": "boolean" }, "availabilityZone": { + "description": "[Deprecated] Replaced by edge_availability_domain.", "enum": [ "ZONE_1", "ZONE_2", @@ -31139,6 +31159,7 @@ "type": "string" }, "bandwidth": { + "description": "Provisioned bandwidth capacity for the interconnectAttachment. Can be set by the partner to update the customer's provisioned bandwidth. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED.", "enum": [ "BPS_100M", "BPS_10G", @@ -31189,7 +31210,7 @@ "type": "string" }, "edgeAvailabilityDomain": { - "description": "Desired availability domain for the attachment. Can only be specified when creating PARTNER-type InterconnectAttachments.\n\nFor improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time. For improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", "enum": [ "AVAILABILITY_DOMAIN_1", "AVAILABILITY_DOMAIN_2", @@ -31250,16 +31271,17 @@ "type": "string" }, "pairingKey": { - "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. Opaque string identifying an PARTNER attachment. Of the form ?cloud-region/XXXXXX?.", + "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", "type": "string" }, "partnerAsn": { - "description": "[Output only for PARTNER. Input for PARTNER_PROVIDER. Not present for DEDICATED] BGP ASN of the Partner. A layer 3 Partner should supply this if they configured BGP on behalf of the customer.", + "description": "Optional BGP ASN for the router that should be supplied by a layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", "format": "int64", "type": "string" }, "partnerMetadata": { - "$ref": "InterconnectAttachmentPartnerMetadata" + "$ref": "InterconnectAttachmentPartnerMetadata", + "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." }, "privateInterconnectInfo": { "$ref": "InterconnectAttachmentPrivateInfo", @@ -31278,7 +31300,7 @@ "type": "string" }, "state": { - "description": "[Output Only] The current state of whether or not this interconnect attachment is functional.", + "description": "[Output Only] The current state of this attachment's functionality.", "enum": [ "ACTIVE", "DEFUNCT", @@ -31545,7 +31567,7 @@ "type": "object" }, "InterconnectAttachmentPartnerMetadata": { - "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments. Only mutable for PARTNER_PROVIDER type, output-only for PARTNER, not available for DEDICATED.", + "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments.", "id": "InterconnectAttachmentPartnerMetadata", "properties": { "interconnectName": { diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 9551eba53..25ab2eaf7 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -5068,6 +5068,17 @@ type Commitment struct { // of the status. StatusMessage string `json:"statusMessage,omitempty"` + // Type: The type of commitment, which affects the discount rate and the + // eligible resources. Type LARGE_VM specifies a commitment that will + // only apply to large VMs. Type NORMAL specifies a commitment that + // applies to all other resources. + // + // Possible values: + // "LARGE_VM" + // "NORMAL" + // "TYPE_UNSPECIFIED" + Type string `json:"type,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -15257,12 +15268,19 @@ type InterconnectAttachment struct { // Not present for PARTNER_PROVIDER. AdminEnabled bool `json:"adminEnabled,omitempty"` + // AvailabilityZone: [Deprecated] Replaced by edge_availability_domain. + // // Possible values: // "ZONE_1" // "ZONE_2" // "ZONE_ANY" AvailabilityZone string `json:"availabilityZone,omitempty"` + // Bandwidth: Provisioned bandwidth capacity for the + // interconnectAttachment. Can be set by the partner to update the + // customer's provisioned bandwidth. Output only for for PARTNER type, + // mutable for PARTNER_PROVIDER, not available for DEDICATED. + // // Possible values: // "BPS_100M" // "BPS_10G" @@ -15304,10 +15322,8 @@ type InterconnectAttachment struct { Description string `json:"description,omitempty"` // EdgeAvailabilityDomain: Desired availability domain for the - // attachment. Can only be specified when creating PARTNER-type - // InterconnectAttachments. - // - // For improved reliability, customers should configure a pair of + // attachment. Only available for type PARTNER, at creation time. For + // improved reliability, customers should configure a pair of // attachments with one per availability domain. The selected // availability domain will be provided to the Partner via the pairing // key so that the provisioned circuit will lie in the specified domain. @@ -15370,15 +15386,20 @@ type InterconnectAttachment struct { OperationalStatus string `json:"operationalStatus,omitempty"` // PairingKey: [Output only for type PARTNER. Input only for - // PARTNER_PROVIDER. Not present for DEDICATED]. Opaque string - // identifying an PARTNER attachment. Of the form ?cloud-region/XXXXXX?. + // PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier + // of an PARTNER attachment used to initiate provisioning with a + // selected partner. Of the form "XXXXX/region/domain" PairingKey string `json:"pairingKey,omitempty"` - // PartnerAsn: [Output only for PARTNER. Input for PARTNER_PROVIDER. Not - // present for DEDICATED] BGP ASN of the Partner. A layer 3 Partner - // should supply this if they configured BGP on behalf of the customer. + // PartnerAsn: Optional BGP ASN for the router that should be supplied + // by a layer 3 Partner if they configured BGP on behalf of the + // customer. Output only for PARTNER type, input only for + // PARTNER_PROVIDER, not available for DEDICATED. PartnerAsn int64 `json:"partnerAsn,omitempty,string"` + // PartnerMetadata: Informational metadata about Partner attachments + // from Partners to display to customers. Output only for for PARTNER + // type, mutable for PARTNER_PROVIDER, not available for DEDICATED. PartnerMetadata *InterconnectAttachmentPartnerMetadata `json:"partnerMetadata,omitempty"` // PrivateInterconnectInfo: [Output Only] Information specific to an @@ -15401,8 +15422,8 @@ type InterconnectAttachment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // State: [Output Only] The current state of whether or not this - // interconnect attachment is functional. + // State: [Output Only] The current state of this attachment's + // functionality. // // Possible values: // "ACTIVE" @@ -15770,8 +15791,7 @@ func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { // InterconnectAttachmentPartnerMetadata: Informational metadata about // Partner attachments from Partners to display to customers. These // fields are propagated from PARTNER_PROVIDER attachments to their -// corresponding PARTNER attachments. Only mutable for PARTNER_PROVIDER -// type, output-only for PARTNER, not available for DEDICATED. +// corresponding PARTNER attachments. type InterconnectAttachmentPartnerMetadata struct { // InterconnectName: Plain text name of the Interconnect this attachment // is connected to, as displayed in the Partner?s portal. For instance @@ -32314,32 +32334,28 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -32483,7 +32499,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -32731,32 +32747,28 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -32902,7 +32914,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -32992,32 +33004,28 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -33161,7 +33169,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -33754,32 +33762,28 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AddressesListCall) Filter(filter string) *AddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -33925,7 +33929,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -34355,32 +34359,28 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -34524,7 +34524,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -35114,32 +35114,28 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -35285,7 +35281,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -36864,32 +36860,28 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c @@ -37033,7 +37025,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -37924,32 +37916,28 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -38093,7 +38081,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -38970,32 +38958,28 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -39139,7 +39123,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -40038,32 +40022,28 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -40207,7 +40187,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -40457,32 +40437,28 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -40628,7 +40604,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -40718,32 +40694,28 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -40887,7 +40859,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -41853,32 +41825,28 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c @@ -42024,7 +41992,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -43270,32 +43238,28 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c @@ -43439,7 +43403,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -44017,32 +43981,28 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -44186,7 +44146,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -44779,32 +44739,28 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -44950,7 +44906,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -46223,32 +46179,28 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -46392,7 +46344,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -47246,32 +47198,28 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -47415,7 +47363,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -48138,32 +48086,28 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -48307,7 +48251,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -48653,32 +48597,28 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -48822,7 +48762,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -49377,32 +49317,28 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -49546,7 +49482,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -50119,32 +50055,28 @@ func (r *HostTypesService) AggregatedList(project string) *HostTypesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HostTypesAggregatedListCall) Filter(filter string) *HostTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -50288,7 +50220,7 @@ func (c *HostTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostTyp // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -50536,32 +50468,28 @@ func (r *HostTypesService) List(project string, zone string) *HostTypesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HostTypesListCall) Filter(filter string) *HostTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -50707,7 +50635,7 @@ func (c *HostTypesListCall) Do(opts ...googleapi.CallOption) (*HostTypeList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -50796,32 +50724,28 @@ func (r *HostsService) AggregatedList(project string) *HostsAggregatedListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HostsAggregatedListCall) Filter(filter string) *HostsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -50965,7 +50889,7 @@ func (c *HostsAggregatedListCall) Do(opts ...googleapi.CallOption) (*HostAggrega // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -51717,32 +51641,28 @@ func (r *HostsService) List(project string, zone string) *HostsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HostsListCall) Filter(filter string) *HostsListCall { c.urlParams_.Set("filter", filter) return c @@ -51888,7 +51808,7 @@ func (c *HostsListCall) Do(opts ...googleapi.CallOption) (*HostList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -52771,32 +52691,28 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -52940,7 +52856,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -53988,32 +53904,28 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -54157,7 +54069,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -55705,32 +55617,28 @@ func (r *ImagesService) List(project string) *ImagesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c @@ -55874,7 +55782,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -56594,32 +56502,28 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -56764,7 +56668,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -57868,32 +57772,28 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -58039,7 +57939,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -58348,32 +58248,28 @@ func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zo return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c @@ -58510,7 +58406,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -60614,32 +60510,28 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -60783,7 +60675,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -61371,32 +61263,28 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -61542,7 +61430,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -61635,32 +61523,28 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -61800,7 +61684,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -62901,32 +62785,28 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c @@ -63070,7 +62950,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -63675,32 +63555,28 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -63844,7 +63720,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -65561,32 +65437,28 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -65732,7 +65604,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -65826,32 +65698,28 @@ func (r *InstancesService) ListReferrers(project string, zone string, instance s return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { c.urlParams_.Set("filter", filter) return c @@ -65999,7 +65867,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -70234,32 +70102,28 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -70404,7 +70268,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -71156,32 +71020,28 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -71327,7 +71187,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -72252,32 +72112,28 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c @@ -72421,7 +72277,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -73276,32 +73132,28 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c @@ -73445,7 +73297,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -75371,32 +75223,28 @@ func (r *LicensesService) List(project string) *LicensesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *LicensesListCall) Filter(filter string) *LicensesListCall { c.urlParams_.Set("filter", filter) return c @@ -75540,7 +75388,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -75918,32 +75766,28 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -76087,7 +75931,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -76337,32 +76181,28 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -76508,7 +76348,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -76597,32 +76437,28 @@ func (r *MaintenancePoliciesService) AggregatedList(project string) *Maintenance return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MaintenancePoliciesAggregatedListCall) Filter(filter string) *MaintenancePoliciesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -76766,7 +76602,7 @@ func (c *MaintenancePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -77517,32 +77353,28 @@ func (r *MaintenancePoliciesService) List(project string, region string) *Mainte return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MaintenancePoliciesListCall) Filter(filter string) *MaintenancePoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -77688,7 +77520,7 @@ func (c *MaintenancePoliciesListCall) Do(opts ...googleapi.CallOption) (*Mainten // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -78095,32 +77927,28 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -78265,7 +78093,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -79213,32 +79041,28 @@ func (r *NetworkEndpointGroupsService) List(project string, zone string) *Networ return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -79384,7 +79208,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -79478,32 +79302,28 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("filter", filter) return c @@ -79645,7 +79465,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -80548,32 +80368,28 @@ func (r *NetworksService) List(project string) *NetworksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -80717,7 +80533,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -80801,32 +80617,28 @@ func (r *NetworksService) ListIpOwners(project string, network string) *Networks return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworksListIpOwnersCall) Filter(filter string) *NetworksListIpOwnersCall { c.urlParams_.Set("filter", filter) return c @@ -81007,7 +80819,7 @@ func (c *NetworksListIpOwnersCall) Do(opts ...googleapi.CallOption) (*IpOwnerLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -85514,32 +85326,28 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -85685,7 +85493,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -86970,32 +86778,28 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -87141,7 +86945,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -87759,32 +87563,28 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -87928,7 +87728,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -88347,32 +88147,28 @@ func (r *RegionCommitmentsService) List(project string, region string) *RegionCo return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -88518,7 +88314,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -88932,32 +88728,28 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -89103,7 +88895,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -89904,32 +89696,28 @@ func (r *RegionDisksService) List(project string, region string) *RegionDisksLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { c.urlParams_.Set("filter", filter) return c @@ -90075,7 +89863,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -91897,32 +91685,28 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -92068,7 +91852,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -92375,32 +92159,28 @@ func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project stri return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c @@ -92537,7 +92317,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -94404,32 +94184,28 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -94575,7 +94351,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -94671,32 +94447,28 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -94837,7 +94609,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -95560,32 +95332,28 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -95731,7 +95499,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -95975,32 +95743,28 @@ func (r *RegionsService) List(project string) *RegionsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c @@ -96144,7 +95908,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -96226,32 +95990,28 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -96395,7 +96155,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -97148,32 +96908,28 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c @@ -97319,7 +97075,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -98569,32 +98325,28 @@ func (r *RoutesService) List(project string) *RoutesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c @@ -98738,7 +98490,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -99623,6 +99375,13 @@ func (c *SecurityPoliciesInsertCall) RequestId(requestId string) *SecurityPolici return c } +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *SecurityPoliciesInsertCall) ValidateOnly(validateOnly bool) *SecurityPoliciesInsertCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -99727,6 +99486,11 @@ func (c *SecurityPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operatio // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed.\n\nFor example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" // } // }, // "path": "{project}/global/securityPolicies", @@ -99763,32 +99527,28 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -99932,7 +99692,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -101127,32 +100887,28 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c @@ -101296,7 +101052,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -102293,32 +102049,28 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -102462,7 +102214,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -103165,32 +102917,28 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -103334,7 +103082,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -103417,32 +103165,28 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c @@ -103588,7 +103332,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -103967,32 +103711,28 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -104136,7 +103876,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -105071,32 +104811,28 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -105242,7 +104978,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -105331,32 +105067,28 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { c.urlParams_.Set("filter", filter) return c @@ -105500,7 +105232,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -106744,32 +106476,28 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -106913,7 +106641,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -107787,32 +107515,28 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -107956,7 +107680,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -108869,32 +108593,28 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -109038,7 +108758,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -109632,32 +109352,28 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -109803,7 +109519,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -110416,32 +110132,28 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -110585,7 +110297,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -111339,32 +111051,28 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c @@ -111510,7 +111218,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -112790,32 +112498,28 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -112959,7 +112663,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -114344,32 +114048,28 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -114513,7 +114213,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -115083,32 +114783,28 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -115252,7 +114948,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -115842,32 +115538,28 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -116013,7 +115705,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -117091,32 +116783,28 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -117260,7 +116948,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -117984,32 +117672,28 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -118153,7 +117837,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -118743,32 +118427,28 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { c.urlParams_.Set("filter", filter) return c @@ -118914,7 +118594,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -119631,32 +119311,28 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -119802,7 +119478,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -120046,32 +119722,28 @@ func (r *ZonesService) List(project string) *ZonesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ZonesListCall) Filter(filter string) *ZonesListCall { c.urlParams_.Set("filter", filter) return c @@ -120215,7 +119887,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index 690984a15..55bd6c251 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/JrYQUQoo567cA8hFPPaVrcEEzOY\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/rbLFk9Clb6vab1hqkKH1aK-_W6I\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -97,7 +97,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -189,7 +189,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -249,7 +249,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -428,7 +428,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -582,7 +582,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -761,7 +761,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1149,7 +1149,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1324,7 +1324,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1558,7 +1558,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1770,7 +1770,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1862,7 +1862,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1922,7 +1922,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2158,7 +2158,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2466,7 +2466,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2638,7 +2638,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2817,7 +2817,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3125,7 +3125,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3356,7 +3356,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3523,7 +3523,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3636,7 +3636,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3794,7 +3794,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4071,7 +4071,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4348,7 +4348,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4708,7 +4708,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4881,7 +4881,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5102,7 +5102,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5693,7 +5693,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5867,7 +5867,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5924,7 +5924,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6236,7 +6236,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6382,7 +6382,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6790,7 +6790,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6848,7 +6848,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7453,6 +7453,47 @@ "https://www.googleapis.com/auth/compute" ] }, + "simulateMaintenanceEvent": { + "description": "Simulates a maintenance event on the instance.", + "httpMethod": "POST", + "id": "compute.instances.simulateMaintenanceEvent", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "start": { "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", "httpMethod": "POST", @@ -7764,7 +7805,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7943,7 +7984,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8180,7 +8221,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8338,7 +8379,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8651,7 +8692,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8704,7 +8745,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8796,7 +8837,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9002,7 +9043,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9782,7 +9823,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10155,7 +10196,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10358,7 +10399,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10491,7 +10532,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10594,7 +10635,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10837,7 +10878,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11259,7 +11300,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11798,7 +11839,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11855,7 +11896,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12096,7 +12137,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12190,7 +12231,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12243,7 +12284,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12464,7 +12505,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12817,7 +12858,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13088,7 +13129,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13372,7 +13413,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13603,7 +13644,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13796,7 +13837,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13845,7 +13886,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13975,7 +14016,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14245,7 +14286,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14301,7 +14342,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14646,7 +14687,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -14882,7 +14923,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15134,7 +15175,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15313,7 +15354,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15516,7 +15557,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -15740,7 +15781,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16103,7 +16144,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16461,7 +16502,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16596,7 +16637,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -16775,7 +16816,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17075,7 +17116,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17283,7 +17324,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17462,7 +17503,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17697,7 +17738,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17791,7 +17832,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -17834,7 +17875,7 @@ } } }, - "revision": "20180220", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -21929,6 +21970,10 @@ ], "type": "string" }, + "disabled": { + "description": "Denotes whether the firewall rule is disabled, i.e not applied to the network it is associated with. When set to true, the firewall rule is not enforced and the network behaves as if it did not exist. If this is unspecified, the firewall rule will be enabled.", + "type": "boolean" + }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64", @@ -25954,9 +25999,11 @@ "description": "Type of interconnect. Note that \"IT_PRIVATE\" has been deprecated in favor of \"DEDICATED\"", "enum": [ "DEDICATED", - "IT_PRIVATE" + "IT_PRIVATE", + "PARTNER" ], "enumDescriptions": [ + "", "", "" ], @@ -26036,6 +26083,18 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "state": { + "description": "[Output Only] The current state of whether or not this Interconnect is functional.", + "enum": [ + "ACTIVE", + "UNPROVISIONED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" } }, "type": "object" @@ -26048,6 +26107,34 @@ "description": "Determines whether this Attachment will carry packets. Not present for PARTNER_PROVIDER.", "type": "boolean" }, + "bandwidth": { + "description": "Provisioned bandwidth capacity for the interconnectAttachment. Can be set by the partner to update the customer's provisioned bandwidth. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED.", + "enum": [ + "BPS_100M", + "BPS_10G", + "BPS_1G", + "BPS_200M", + "BPS_2G", + "BPS_300M", + "BPS_400M", + "BPS_500M", + "BPS_50M", + "BPS_5G" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, "candidateSubnets": { "description": "Up to 16 candidate prefixes that can be used to restrict the allocation of cloudRouterIpAddress and customerRouterIpAddress for this attachment. All prefixes must be within link-local address space (169.254.0.0/16) and must be /29 or shorter (/28, /27, etc). Google will attempt to select an unused /29 from the supplied candidate prefix(es). The request will fail if all possible /29s are in use on Google?s edge. If not supplied, Google will randomly select an unused /29 from all of link-local space.", "items": { @@ -26071,6 +26158,20 @@ "description": "An optional description of this resource.", "type": "string" }, + "edgeAvailabilityDomain": { + "description": "Desired availability domain for the attachment. Only available for type PARTNER, at creation time. For improved reliability, customers should configure a pair of attachments with one per availability domain. The selected availability domain will be provided to the Partner via the pairing key so that the provisioned circuit will lie in the specified domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.", + "enum": [ + "AVAILABILITY_DOMAIN_1", + "AVAILABILITY_DOMAIN_2", + "AVAILABILITY_DOMAIN_ANY" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, "googleReferenceId": { "description": "[Output Only] Google reference ID, to be used when raising support tickets with Google or otherwise to debug backend connectivity issues.", "type": "string" @@ -26118,6 +26219,19 @@ ], "type": "string" }, + "pairingKey": { + "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", + "type": "string" + }, + "partnerAsn": { + "description": "Optional BGP ASN for the router that should be supplied by a layer 3 Partner if they configured BGP on behalf of the customer. Output only for PARTNER type, input only for PARTNER_PROVIDER, not available for DEDICATED.", + "format": "int64", + "type": "string" + }, + "partnerMetadata": { + "$ref": "InterconnectAttachmentPartnerMetadata", + "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." + }, "privateInterconnectInfo": { "$ref": "InterconnectAttachmentPrivateInfo", "description": "[Output Only] Information specific to an InterconnectAttachment. This property is populated if the interconnect that this is attached to is of type DEDICATED." @@ -26134,11 +26248,37 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, - "type": { + "state": { + "description": "[Output Only] The current state of this attachment's functionality.", "enum": [ - "DEDICATED" + "ACTIVE", + "DEFUNCT", + "PARTNER_REQUEST_RECEIVED", + "PENDING_CUSTOMER", + "PENDING_PARTNER", + "STATE_UNSPECIFIED", + "UNPROVISIONED" ], "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "type": { + "enum": [ + "DEDICATED", + "PARTNER", + "PARTNER_PROVIDER" + ], + "enumDescriptions": [ + "", + "", "" ], "type": "string" @@ -26375,6 +26515,25 @@ }, "type": "object" }, + "InterconnectAttachmentPartnerMetadata": { + "description": "Informational metadata about Partner attachments from Partners to display to customers. These fields are propagated from PARTNER_PROVIDER attachments to their corresponding PARTNER attachments.", + "id": "InterconnectAttachmentPartnerMetadata", + "properties": { + "interconnectName": { + "description": "Plain text name of the Interconnect this attachment is connected to, as displayed in the Partner?s portal. For instance ?Chicago 1?. This value may be validated to match approved Partner values.", + "type": "string" + }, + "partnerName": { + "description": "Plain text name of the Partner providing this attachment. This value may be validated to match approved Partner values.", + "type": "string" + }, + "portalUrl": { + "description": "URL of the Partner?s portal for this Attachment. Partners may customise this to be a deep-link to the specific resource on the Partner portal. This value may be validated to match approved Partner values.", + "type": "string" + } + }, + "type": "object" + }, "InterconnectAttachmentPrivateInfo": { "description": "Information for an interconnect attachment when this belongs to an interconnect of type DEDICATED.", "id": "InterconnectAttachmentPrivateInfo", @@ -28953,6 +29112,7 @@ "NETWORKS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", + "NVIDIA_V100_GPUS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", "PREEMPTIBLE_NVIDIA_K80_GPUS", @@ -29024,6 +29184,7 @@ "", "", "", + "", "" ], "type": "string" @@ -32088,6 +32249,10 @@ "description": "An optional description of this resource. Provide this property when you create the resource. This field can be set only at resource creation time.", "type": "string" }, + "enableFlowLogs": { + "description": "Whether to enable flow logging for this subnetwork.", + "type": "boolean" + }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a Subnetwork. An up-to-date fingerprint must be provided in order to update the Subnetwork.", "format": "byte", diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 993cc7a80..b07e16205 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -6787,6 +6787,12 @@ type Firewall struct { // "INGRESS" Direction string `json:"direction,omitempty"` + // Disabled: Denotes whether the firewall rule is disabled, i.e not + // applied to the network it is associated with. When set to true, the + // firewall rule is not enforced and the network behaves as if it did + // not exist. If this is unspecified, the firewall rule will be enabled. + Disabled bool `json:"disabled,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -12752,6 +12758,7 @@ type Interconnect struct { // Possible values: // "DEDICATED" // "IT_PRIVATE" + // "PARTNER" InterconnectType string `json:"interconnectType,omitempty"` // Kind: [Output Only] Type of the resource. Always compute#interconnect @@ -12827,6 +12834,14 @@ type Interconnect struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] The current state of whether or not this + // Interconnect is functional. + // + // Possible values: + // "ACTIVE" + // "UNPROVISIONED" + State string `json:"state,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -12863,6 +12878,24 @@ type InterconnectAttachment struct { // Not present for PARTNER_PROVIDER. AdminEnabled bool `json:"adminEnabled,omitempty"` + // Bandwidth: Provisioned bandwidth capacity for the + // interconnectAttachment. Can be set by the partner to update the + // customer's provisioned bandwidth. Output only for for PARTNER type, + // mutable for PARTNER_PROVIDER, not available for DEDICATED. + // + // Possible values: + // "BPS_100M" + // "BPS_10G" + // "BPS_1G" + // "BPS_200M" + // "BPS_2G" + // "BPS_300M" + // "BPS_400M" + // "BPS_500M" + // "BPS_50M" + // "BPS_5G" + Bandwidth string `json:"bandwidth,omitempty"` + // CandidateSubnets: Up to 16 candidate prefixes that can be used to // restrict the allocation of cloudRouterIpAddress and // customerRouterIpAddress for this attachment. All prefixes must be @@ -12890,6 +12923,20 @@ type InterconnectAttachment struct { // Description: An optional description of this resource. Description string `json:"description,omitempty"` + // EdgeAvailabilityDomain: Desired availability domain for the + // attachment. Only available for type PARTNER, at creation time. For + // improved reliability, customers should configure a pair of + // attachments with one per availability domain. The selected + // availability domain will be provided to the Partner via the pairing + // key so that the provisioned circuit will lie in the specified domain. + // If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + // + // Possible values: + // "AVAILABILITY_DOMAIN_1" + // "AVAILABILITY_DOMAIN_2" + // "AVAILABILITY_DOMAIN_ANY" + EdgeAvailabilityDomain string `json:"edgeAvailabilityDomain,omitempty"` + // GoogleReferenceId: [Output Only] Google reference ID, to be used when // raising support tickets with Google or otherwise to debug backend // connectivity issues. @@ -12940,6 +12987,23 @@ type InterconnectAttachment struct { // "OS_UNPROVISIONED" OperationalStatus string `json:"operationalStatus,omitempty"` + // PairingKey: [Output only for type PARTNER. Input only for + // PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier + // of an PARTNER attachment used to initiate provisioning with a + // selected partner. Of the form "XXXXX/region/domain" + PairingKey string `json:"pairingKey,omitempty"` + + // PartnerAsn: Optional BGP ASN for the router that should be supplied + // by a layer 3 Partner if they configured BGP on behalf of the + // customer. Output only for PARTNER type, input only for + // PARTNER_PROVIDER, not available for DEDICATED. + PartnerAsn int64 `json:"partnerAsn,omitempty,string"` + + // PartnerMetadata: Informational metadata about Partner attachments + // from Partners to display to customers. Output only for for PARTNER + // type, mutable for PARTNER_PROVIDER, not available for DEDICATED. + PartnerMetadata *InterconnectAttachmentPartnerMetadata `json:"partnerMetadata,omitempty"` + // PrivateInterconnectInfo: [Output Only] Information specific to an // InterconnectAttachment. This property is populated if the // interconnect that this is attached to is of type DEDICATED. @@ -12960,8 +13024,23 @@ type InterconnectAttachment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // State: [Output Only] The current state of this attachment's + // functionality. + // + // Possible values: + // "ACTIVE" + // "DEFUNCT" + // "PARTNER_REQUEST_RECEIVED" + // "PENDING_CUSTOMER" + // "PENDING_PARTNER" + // "STATE_UNSPECIFIED" + // "UNPROVISIONED" + State string `json:"state,omitempty"` + // Possible values: // "DEDICATED" + // "PARTNER" + // "PARTNER_PROVIDER" Type string `json:"type,omitempty"` // VlanTag8021q: Available only for DEDICATED and PARTNER_PROVIDER. @@ -13311,6 +13390,52 @@ func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectAttachmentPartnerMetadata: Informational metadata about +// Partner attachments from Partners to display to customers. These +// fields are propagated from PARTNER_PROVIDER attachments to their +// corresponding PARTNER attachments. +type InterconnectAttachmentPartnerMetadata struct { + // InterconnectName: Plain text name of the Interconnect this attachment + // is connected to, as displayed in the Partner?s portal. For instance + // ?Chicago 1?. This value may be validated to match approved Partner + // values. + InterconnectName string `json:"interconnectName,omitempty"` + + // PartnerName: Plain text name of the Partner providing this + // attachment. This value may be validated to match approved Partner + // values. + PartnerName string `json:"partnerName,omitempty"` + + // PortalUrl: URL of the Partner?s portal for this Attachment. Partners + // may customise this to be a deep-link to the specific resource on the + // Partner portal. This value may be validated to match approved Partner + // values. + PortalUrl string `json:"portalUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InterconnectName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPartnerMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectAttachmentPrivateInfo: Information for an interconnect // attachment when this belongs to an interconnect of type DEDICATED. type InterconnectAttachmentPrivateInfo struct { @@ -17106,6 +17231,7 @@ type Quota struct { // "NETWORKS" // "NVIDIA_K80_GPUS" // "NVIDIA_P100_GPUS" + // "NVIDIA_V100_GPUS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" // "PREEMPTIBLE_NVIDIA_K80_GPUS" @@ -21585,6 +21711,9 @@ type Subnetwork struct { // resource creation time. Description string `json:"description,omitempty"` + // EnableFlowLogs: Whether to enable flow logging for this subnetwork. + EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This // field will be ignored when inserting a Subnetwork. An up-to-date @@ -27109,32 +27238,28 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -27278,7 +27403,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -27526,32 +27651,28 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -27697,7 +27818,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -27787,32 +27908,28 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -27956,7 +28073,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -28549,32 +28666,28 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AddressesListCall) Filter(filter string) *AddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -28720,7 +28833,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -29150,32 +29263,28 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -29319,7 +29428,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -29909,32 +30018,28 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -30080,7 +30185,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -31507,32 +31612,28 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c @@ -31676,7 +31777,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -32272,32 +32373,28 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -32441,7 +32538,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -33318,32 +33415,28 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -33487,7 +33580,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -34238,32 +34331,28 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -34407,7 +34496,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -34657,32 +34746,28 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -34828,7 +34913,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -34918,32 +35003,28 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -35087,7 +35168,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -35890,32 +35971,28 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c @@ -36061,7 +36138,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -37149,32 +37226,28 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c @@ -37318,7 +37391,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -37896,32 +37969,28 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -38065,7 +38134,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -38658,32 +38727,28 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -38829,7 +38894,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -39918,32 +39983,28 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -40087,7 +40148,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -40941,32 +41002,28 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -41110,7 +41167,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -41660,32 +41717,28 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -41829,7 +41882,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -42175,32 +42228,28 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -42344,7 +42393,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -42899,32 +42948,28 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -43068,7 +43113,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -44118,32 +44163,28 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -44287,7 +44328,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -45335,32 +45376,28 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -45504,7 +45541,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -46900,32 +46937,28 @@ func (r *ImagesService) List(project string) *ImagesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c @@ -47069,7 +47102,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -47642,32 +47675,28 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -47812,7 +47841,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -48603,32 +48632,28 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -48774,7 +48799,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -50907,32 +50932,28 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -51076,7 +51097,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -51664,32 +51685,28 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -51835,7 +51852,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -51928,32 +51945,28 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -52093,7 +52106,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -53194,32 +53207,28 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c @@ -53363,7 +53372,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -53785,32 +53794,28 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -53954,7 +53959,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -55334,32 +55339,28 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -55505,7 +55506,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -55599,32 +55600,28 @@ func (r *InstancesService) ListReferrers(project string, zone string, instance s return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { c.urlParams_.Set("filter", filter) return c @@ -55772,7 +55769,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -57858,6 +57855,154 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } +// method id "compute.instances.simulateMaintenanceEvent": + +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SimulateMaintenanceEvent: Simulates a maintenance event on the +// instance. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Simulates a maintenance event on the instance.", + // "httpMethod": "POST", + // "id": "compute.instances.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.start": type InstancesStartCall struct { @@ -58952,32 +59097,28 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -59122,7 +59263,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -59711,32 +59852,28 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -59882,7 +60019,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -60649,32 +60786,28 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c @@ -60818,7 +60951,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -61373,32 +61506,28 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c @@ -61542,7 +61671,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -62722,32 +62851,28 @@ func (r *LicensesService) List(project string) *LicensesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *LicensesListCall) Filter(filter string) *LicensesListCall { c.urlParams_.Set("filter", filter) return c @@ -62891,7 +63016,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -62974,32 +63099,28 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -63143,7 +63264,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -63393,32 +63514,28 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -63564,7 +63681,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -64300,32 +64417,28 @@ func (r *NetworksService) List(project string) *NetworksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -64469,7 +64582,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -67821,32 +67934,28 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -67992,7 +68101,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -69277,32 +69386,28 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -69448,7 +69553,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -70066,32 +70171,28 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -70235,7 +70336,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -70654,32 +70755,28 @@ func (r *RegionCommitmentsService) List(project string, region string) *RegionCo return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -70825,7 +70922,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -71080,32 +71177,28 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -71251,7 +71344,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -72042,32 +72135,28 @@ func (r *RegionDisksService) List(project string, region string) *RegionDisksLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { c.urlParams_.Set("filter", filter) return c @@ -72213,7 +72302,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -73723,32 +73812,28 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -73894,7 +73979,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -75812,32 +75897,28 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -75983,7 +76064,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -76079,32 +76160,28 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -76245,7 +76322,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -76968,32 +77045,28 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -77139,7 +77212,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -77383,32 +77456,28 @@ func (r *RegionsService) List(project string) *RegionsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c @@ -77552,7 +77621,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -77634,32 +77703,28 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -77803,7 +77868,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -78556,32 +78621,28 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c @@ -78727,7 +78788,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -79977,32 +80038,28 @@ func (r *RoutesService) List(project string) *RoutesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c @@ -80146,7 +80203,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -81159,32 +81216,28 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -81328,7 +81381,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -82359,32 +82412,28 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c @@ -82528,7 +82577,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -83378,32 +83427,28 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -83547,7 +83592,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -84250,32 +84295,28 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -84419,7 +84460,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -84502,32 +84543,28 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c @@ -84673,7 +84710,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -85052,32 +85089,28 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -85221,7 +85254,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -86156,32 +86189,28 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -86327,7 +86356,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -86416,32 +86445,28 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { c.urlParams_.Set("filter", filter) return c @@ -86585,7 +86610,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -87829,32 +87854,28 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -87998,7 +88019,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -88872,32 +88893,28 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -89041,7 +89058,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -89954,32 +89971,28 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -90123,7 +90136,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -90717,32 +90730,28 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -90888,7 +90897,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -91501,32 +91510,28 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -91670,7 +91675,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -92424,32 +92429,28 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c @@ -92595,7 +92596,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -93875,32 +93876,28 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -94044,7 +94041,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -95429,32 +95426,28 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -95598,7 +95591,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -96020,32 +96013,28 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -96189,7 +96178,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -96779,32 +96768,28 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -96950,7 +96935,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -98028,32 +98013,28 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -98197,7 +98178,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -98921,32 +98902,28 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -99090,7 +99067,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -99680,32 +99657,28 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { c.urlParams_.Set("filter", filter) return c @@ -99851,7 +99824,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -100568,32 +100541,28 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -100739,7 +100708,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -100983,32 +100952,28 @@ func (r *ZonesService) List(project string) *ZonesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ZonesListCall) Filter(filter string) *ZonesListCall { c.urlParams_.Set("filter", filter) return c @@ -101152,7 +101117,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index a188c6df4..afc487bd2 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -29,7 +29,7 @@ "description": "Creates and runs virtual machines on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/compute/docs/reference/latest/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/-YzN1b0_oAWbrk9rmdu60A0nf94\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/QdrSoC3sccj4V1AevKB02CZS13o\"", "icons": { "x16": "https://www.google.com/images/icons/product/compute_engine-16.png", "x32": "https://www.google.com/images/icons/product/compute_engine-32.png" @@ -97,7 +97,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -189,7 +189,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -249,7 +249,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -428,7 +428,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -488,7 +488,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -667,7 +667,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -926,7 +926,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1061,7 +1061,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1251,7 +1251,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1386,7 +1386,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1478,7 +1478,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1538,7 +1538,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -1774,7 +1774,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2037,7 +2037,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2172,7 +2172,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2351,7 +2351,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2565,7 +2565,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2723,7 +2723,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2817,7 +2817,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -2930,7 +2930,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3088,7 +3088,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3328,7 +3328,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3568,7 +3568,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -3891,7 +3891,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4027,7 +4027,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4248,7 +4248,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4606,7 +4606,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4780,7 +4780,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -4837,7 +4837,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5104,7 +5104,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5213,7 +5213,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5611,7 +5611,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -5669,7 +5669,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6540,7 +6540,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6719,7 +6719,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6813,7 +6813,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -6971,7 +6971,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7103,7 +7103,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7195,7 +7195,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -7401,7 +7401,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8111,7 +8111,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8439,7 +8439,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8597,7 +8597,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -8730,7 +8730,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9009,7 +9009,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9362,7 +9362,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9419,7 +9419,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9615,7 +9615,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9709,7 +9709,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9762,7 +9762,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -9983,7 +9983,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10291,7 +10291,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10416,7 +10416,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10610,7 +10610,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10663,7 +10663,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -10891,7 +10891,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11154,7 +11154,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11353,7 +11353,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11488,7 +11488,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11667,7 +11667,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -11825,7 +11825,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12049,7 +12049,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12367,7 +12367,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12648,7 +12648,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12783,7 +12783,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -12962,7 +12962,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13168,7 +13168,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13339,7 +13339,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13518,7 +13518,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13659,7 +13659,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13753,7 +13753,7 @@ ], "parameters": { "filter": { - "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", "location": "query", "type": "string" }, @@ -13796,7 +13796,7 @@ } } }, - "revision": "20180220", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -23818,6 +23818,7 @@ "NETWORKS", "NVIDIA_K80_GPUS", "NVIDIA_P100_GPUS", + "NVIDIA_V100_GPUS", "PREEMPTIBLE_CPUS", "PREEMPTIBLE_LOCAL_SSD_GB", "PREEMPTIBLE_NVIDIA_K80_GPUS", @@ -23889,6 +23890,7 @@ "", "", "", + "", "" ], "type": "string" diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index a4be4a414..712fd93b7 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -15285,6 +15285,7 @@ type Quota struct { // "NETWORKS" // "NVIDIA_K80_GPUS" // "NVIDIA_P100_GPUS" + // "NVIDIA_V100_GPUS" // "PREEMPTIBLE_CPUS" // "PREEMPTIBLE_LOCAL_SSD_GB" // "PREEMPTIBLE_NVIDIA_K80_GPUS" @@ -23695,32 +23696,28 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -23864,7 +23861,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -24112,32 +24109,28 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -24283,7 +24276,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -24373,32 +24366,28 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -24542,7 +24531,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -25135,32 +25124,28 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AddressesListCall) Filter(filter string) *AddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -25306,7 +25291,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -25395,32 +25380,28 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -25564,7 +25545,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -26154,32 +26135,28 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -26325,7 +26302,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -27256,32 +27233,28 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c @@ -27425,7 +27398,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -27851,32 +27824,28 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -28020,7 +27989,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -28729,32 +28698,28 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -28898,7 +28863,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -29331,32 +29296,28 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -29500,7 +29461,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -29750,32 +29711,28 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -29921,7 +29878,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -30011,32 +29968,28 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -30180,7 +30133,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -30983,32 +30936,28 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c @@ -31154,7 +31103,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -32083,32 +32032,28 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c @@ -32252,7 +32197,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -32682,32 +32627,28 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -32851,7 +32792,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -33444,32 +33385,28 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -33615,7 +33552,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -34363,32 +34300,28 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -34532,7 +34465,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -35091,32 +35024,28 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -35260,7 +35189,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -35515,32 +35444,28 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -35684,7 +35609,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -36030,32 +35955,28 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -36199,7 +36120,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -36754,32 +36675,28 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -36923,7 +36840,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -37825,32 +37742,28 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -37994,7 +37907,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -38894,32 +38807,28 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -39063,7 +38972,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -40311,32 +40220,28 @@ func (r *ImagesService) List(project string) *ImagesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c @@ -40480,7 +40385,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -40905,32 +40810,28 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -41075,7 +40976,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -41866,32 +41767,28 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -42037,7 +41934,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -43250,32 +43147,28 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -43419,7 +43312,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -44007,32 +43900,28 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -44178,7 +44067,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -44271,32 +44160,28 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -44436,7 +44321,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -45378,32 +45263,28 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c @@ -45547,7 +45428,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -45821,32 +45702,28 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -45990,7 +45867,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -47336,32 +47213,28 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -47507,7 +47380,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -47601,32 +47474,28 @@ func (r *InstancesService) ListReferrers(project string, zone string, instance s return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { c.urlParams_.Set("filter", filter) return c @@ -47774,7 +47643,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -50795,32 +50664,28 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -50965,7 +50830,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -51554,32 +51419,28 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -51725,7 +51586,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -51968,32 +51829,28 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c @@ -52137,7 +51994,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -52692,32 +52549,28 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c @@ -52861,7 +52714,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -53268,32 +53121,28 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -53437,7 +53286,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -53687,32 +53536,28 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -53858,7 +53703,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -54594,32 +54439,28 @@ func (r *NetworksService) List(project string) *NetworksListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -54763,7 +54604,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -57805,32 +57646,28 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -57976,7 +57813,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -59102,32 +58939,28 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -59273,7 +59106,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -59732,32 +59565,28 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -59901,7 +59730,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -60320,32 +60149,28 @@ func (r *RegionCommitmentsService) List(project string, region string) *RegionCo return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -60491,7 +60316,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -61480,32 +61305,28 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -61651,7 +61472,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -62841,32 +62662,28 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -63012,7 +62829,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -63108,32 +62925,28 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -63274,7 +63087,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -63838,32 +63651,28 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -64009,7 +63818,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -64253,32 +64062,28 @@ func (r *RegionsService) List(project string) *RegionsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c @@ -64422,7 +64227,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -64504,32 +64309,28 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -64673,7 +64474,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -65426,32 +65227,28 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c @@ -65597,7 +65394,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -66688,32 +66485,28 @@ func (r *RoutesService) List(project string) *RoutesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c @@ -66857,7 +66650,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -67261,32 +67054,28 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c @@ -67430,7 +67219,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -68132,32 +67921,28 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -68301,7 +68086,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -68383,32 +68168,28 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -68552,7 +68333,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -69324,32 +69105,28 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -69495,7 +69272,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -70429,32 +70206,28 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -70598,7 +70371,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -71324,32 +71097,28 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -71493,7 +71262,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -71916,32 +71685,28 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -72085,7 +71850,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -72679,32 +72444,28 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -72850,7 +72611,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -73304,32 +73065,28 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -73473,7 +73230,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -74227,32 +73984,28 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c @@ -74398,7 +74151,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -75519,32 +75272,28 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -75688,7 +75437,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -76753,32 +76502,28 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -76922,7 +76667,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -77344,32 +77089,28 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -77513,7 +77254,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -78103,32 +77844,28 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -78274,7 +78011,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -79011,32 +78748,28 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -79180,7 +78913,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -79756,32 +79489,28 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -79925,7 +79654,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -80515,32 +80244,28 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { c.urlParams_.Set("filter", filter) return c @@ -80686,7 +80411,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -81062,32 +80787,28 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -81233,7 +80954,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, @@ -81477,32 +81198,28 @@ func (r *ZonesService) List(project string) *ZonesListCall { return c } -// Filter sets the optional parameter "filter": Sets a filter -// {expression} for filtering listed resources. Your {expression} must -// be in the format: field_name comparison_string literal_string. +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. The expression must specify +// the field name, a comparison operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The comparison operator must be either =, !=, >, or <. // -// The field_name is the name of the field you want to compare. Only -// atomic field types are supported (string, number, boolean). The -// comparison_string must be either eq (equals) or ne (not equals). The -// literal_string is the string value to filter to. The literal value -// must be valid for the type of field you are filtering by (string, -// number, boolean). For string fields, the literal value is interpreted -// as a regular expression using RE2 syntax. The literal value must -// match the entire field. +// For example, if you are filtering Compute Engine instances, you can +// exclude instances named example-instance by specifying name != +// example-instance. // -// For example, to filter for instances that do not have a name of -// example-instance, you would use name ne example-instance. -// -// You can filter on nested fields. For example, you could filter on -// instances that have set the scheduling.automaticRestart field to -// true. Use filtering on nested fields to take advantage of labels to -// organize and search for results based on label values. +// You can also filter nested fields. For example, you could specify +// scheduling.automaticRestart = false to include instances only if they +// are not scheduled for automatic restarts. You can use filtering on +// nested fields to filter based on resource labels. // // To filter on multiple expressions, provide each separate expression -// within parentheses. For example, (scheduling.automaticRestart eq -// true) (zone eq us-central1-f). Multiple expressions are treated as -// AND expressions, meaning that resources must match all expressions to -// pass the filters. +// within parentheses. For example, (scheduling.automaticRestart = true) +// (cpuPlatform = "Intel Skylake"). By default, each expression is an +// AND expression. However, you can include AND and OR expressions +// explicitly. For example, (cpuPlatform = "Intel Skylake") OR +// (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = +// true). func (c *ZonesListCall) Filter(filter string) *ZonesListCall { c.urlParams_.Set("filter", filter) return c @@ -81646,7 +81363,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter {expression} for filtering listed resources. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "A filter expression that filters resources listed in the response. The expression must specify the field name, a comparison operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The comparison operator must be either =, !=, \u003e, or \u003c.\n\nFor example, if you are filtering Compute Engine instances, you can exclude instances named example-instance by specifying name != example-instance.\n\nYou can also filter nested fields. For example, you could specify scheduling.automaticRestart = false to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\"). By default, each expression is an AND expression. However, you can include AND and OR expressions explicitly. For example, (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true).", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index c0525bdd5..9adcb6cd1 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -117,6 +117,969 @@ "resources": { "projects": { "resources": { + "locations": { + "methods": { + "getServerConfig": { + "description": "Returns configuration info about the Kubernetes Engine service.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/serverConfig", + "httpMethod": "GET", + "id": "container.projects.locations.getServerConfig", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) to return operations for.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}/serverConfig", + "response": { + "$ref": "ServerConfig" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "clusters": { + "methods": { + "completeIpRotation": { + "description": "Completes master IP rotation.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:completeIpRotation", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.completeIpRotation", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster id) of the cluster to complete IP rotation.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:completeIpRotation", + "request": { + "$ref": "CompleteIPRotationRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format 'projects/*/locations/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/clusters", + "request": { + "$ref": "CreateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", + "httpMethod": "DELETE", + "id": "container.projects.locations.clusters.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "clusterId": { + "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the details of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "clusterId": { + "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Cluster" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/clusters", + "response": { + "$ref": "ListClustersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setAddons": { + "description": "Sets the addons of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setAddons", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setAddons", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setAddons", + "request": { + "$ref": "SetAddonsConfigRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setLegacyAbac": { + "description": "Enables or disables the ABAC authorization mechanism on a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLegacyAbac", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setLegacyAbac", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setLegacyAbac", + "request": { + "$ref": "SetLegacyAbacRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setLocations": { + "description": "Sets the locations of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setLocations", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setLocations", + "request": { + "$ref": "SetLocationsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setLogging": { + "description": "Sets the logging service of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLogging", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setLogging", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setLogging", + "request": { + "$ref": "SetLoggingServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setMaintenancePolicy": { + "description": "Sets the maintenance policy for a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMaintenancePolicy", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setMaintenancePolicy", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setMaintenancePolicy", + "request": { + "$ref": "SetMaintenancePolicyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setMasterAuth": { + "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password of a specific cluster.\nThis can be either via password generation or explicitly set the password.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setMasterAuth", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setMasterAuth", + "request": { + "$ref": "SetMasterAuthRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setMonitoring": { + "description": "Sets the monitoring service of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMonitoring", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setMonitoring", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setMonitoring", + "request": { + "$ref": "SetMonitoringServiceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setNetworkPolicy": { + "description": "Enables/Disables Network Policy for a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setNetworkPolicy", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setNetworkPolicy", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster id) of the cluster to set networking policy.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setNetworkPolicy", + "request": { + "$ref": "SetNetworkPolicyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setResourceLabels": { + "description": "Sets labels on a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setResourceLabels", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.setResourceLabels", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setResourceLabels", + "request": { + "$ref": "SetLabelsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "startIpRotation": { + "description": "Start master IP rotation.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:startIpRotation", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.startIpRotation", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster id) of the cluster to start IP rotation.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:startIpRotation", + "request": { + "$ref": "StartIPRotationRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "description": "Updates the settings of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", + "httpMethod": "PUT", + "id": "container.projects.locations.clusters.update", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "UpdateClusterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "updateMaster": { + "description": "Updates the master of a specific cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:updateMaster", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.updateMaster", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:updateMaster", + "request": { + "$ref": "UpdateMasterRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "nodePools": { + "methods": { + "create": { + "description": "Creates a node pool for a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.nodePools.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent (project, location, cluster id) where the node pool will be created.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+parent}/nodePools", + "request": { + "$ref": "CreateNodePoolRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a node pool from a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", + "httpMethod": "DELETE", + "id": "container.projects.locations.clusters.nodePools.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the node pool requested.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.nodePools.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to get.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "NodePool" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists the node pools for a cluster.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools", + "httpMethod": "GET", + "id": "container.projects.locations.clusters.nodePools.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent (project, location, cluster id) where the node pools will be listed.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/nodePools", + "response": { + "$ref": "ListNodePoolsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "rollback": { + "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.nodePools.rollback", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:rollback", + "request": { + "$ref": "RollbackNodePoolUpgradeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setAutoscaling": { + "description": "Sets the autoscaling settings of a specific node pool.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setAutoscaling", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.nodePools.setAutoscaling", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setAutoscaling", + "request": { + "$ref": "SetNodePoolAutoscalingRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setManagement": { + "description": "Sets the NodeManagement options for a node pool.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setManagement", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.nodePools.setManagement", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setManagement", + "request": { + "$ref": "SetNodePoolManagementRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "setSize": { + "description": "Sets the size of a specific node pool.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setSize", + "httpMethod": "POST", + "id": "container.projects.locations.clusters.nodePools.setSize", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:setSize", + "request": { + "$ref": "SetNodePoolSizeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "update": { + "description": "Updates the version and/or image type of a specific node pool.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", + "httpMethod": "PUT", + "id": "container.projects.locations.clusters.nodePools.update", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, cluster, node pool) of the node pool to update.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "request": { + "$ref": "UpdateNodePoolRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Cancels the specified operation.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + "httpMethod": "POST", + "id": "container.projects.locations.operations.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}:cancel", + "request": { + "$ref": "CancelOperationRequest" + }, + "response": { + "$ref": "Empty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the specified operation.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "container.projects.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + }, + "operationId": { + "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists all operations in a project in a specific zone or all zones.", + "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + "httpMethod": "GET", + "id": "container.projects.locations.operations.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.\nThis field has been deprecated and replaced by the parent field.", + "location": "query", + "type": "string" + } + }, + "path": "v1/{+parent}/operations", + "response": { + "$ref": "ListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, "zones": { "methods": { "getServerconfig": { @@ -129,14 +1092,19 @@ "zone" ], "parameters": { + "name": { + "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + "location": "query", + "type": "string" + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) to return operations for.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -166,19 +1134,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -207,19 +1175,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -247,13 +1215,13 @@ ], "parameters": { "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -282,19 +1250,24 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to delete.", + "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, + "name": { + "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "query", + "type": "string" + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -320,19 +1293,24 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to retrieve.", + "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, + "name": { + "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "query", + "type": "string" + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -358,19 +1336,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to update.", + "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -397,14 +1375,19 @@ "zone" ], "parameters": { + "parent": { + "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + "location": "query", + "type": "string" + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -430,19 +1413,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -471,19 +1454,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -512,19 +1495,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -553,19 +1536,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -594,19 +1577,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -676,19 +1659,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -717,19 +1700,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -758,19 +1741,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -799,19 +1782,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -845,25 +1828,25 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "The name of the node pool to upgrade.", + "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -892,19 +1875,19 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -934,25 +1917,30 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "query", + "type": "string" + }, "nodePoolId": { - "description": "The name of the node pool to delete.", + "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -979,25 +1967,30 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to get.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "location": "query", + "type": "string" + }, "nodePoolId": { - "description": "The name of the node pool.", + "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1023,19 +2016,24 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster.", + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, + "parent": { + "description": "The parent (project, location, cluster id) where the node pools will be listed.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "location": "query", + "type": "string" + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -1062,25 +2060,25 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to rollback.", + "description": "Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "The name of the node pool to rollback.", + "description": "Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1110,25 +2108,25 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to update.", + "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "The name of the node pool to update.", + "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1158,25 +2156,25 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to update.", + "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "The name of the node pool to update.", + "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1206,25 +2204,25 @@ ], "parameters": { "clusterId": { - "description": "The name of the cluster to upgrade.", + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "nodePoolId": { - "description": "The name of the node pool to upgrade.", + "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1259,19 +2257,19 @@ ], "parameters": { "operationId": { - "description": "The server-assigned `name` of the operation.", + "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1299,20 +2297,25 @@ "operationId" ], "parameters": { + "name": { + "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + "location": "query", + "type": "string" + }, "operationId": { - "description": "The server-assigned `name` of the operation.", + "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "location": "path", "required": true, "type": "string" @@ -1336,14 +2339,19 @@ "zone" ], "parameters": { + "parent": { + "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + "location": "query", + "type": "string" + }, "projectId": { - "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.", + "description": "Deprecated. The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.\nThis field has been deprecated and replaced by the parent field.", "location": "path", "required": true, "type": "string" @@ -1364,7 +2372,7 @@ } } }, - "revision": "20180223", + "revision": "20180308", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -1424,7 +2432,24 @@ "CancelOperationRequest": { "description": "CancelOperationRequest cancels a single operation.", "id": "CancelOperationRequest", - "properties": {}, + "properties": { + "name": { + "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + "type": "string" + }, + "operationId": { + "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + } + }, "type": "object" }, "CidrBlock": { @@ -1499,7 +2524,7 @@ "type": "string" }, "initialClusterVersion": { - "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.", + "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"\",\"-\": picks the default Kubernetes version", "type": "string" }, "initialNodeCount": { @@ -1526,6 +2551,10 @@ "$ref": "LegacyAbac", "description": "Configuration for the legacy ABAC authorization mode." }, + "location": { + "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/regions-zones/regions-zones#available) or\n[region](/compute/docs/regions-zones/regions-zones#available) in which\nthe cluster resides.", + "type": "string" + }, "locations": { "description": "The list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located.", "items": { @@ -1627,7 +2656,7 @@ "type": "string" }, "zone": { - "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field is deprecated, use location instead.", "type": "string" } }, @@ -1657,7 +2686,7 @@ "description": "Master authorized networks is a Beta feature.\nThe desired configuration options for master authorized networks feature." }, "desiredMasterVersion": { - "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version. Use \"-\" to have the server automatically select\nthe latest version.", + "description": "The Kubernetes version to change the master to.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", "type": "string" }, "desiredMonitoringService": { @@ -1673,7 +2702,7 @@ "type": "string" }, "desiredNodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", + "description": "The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", "type": "string" } }, @@ -1682,7 +2711,24 @@ "CompleteIPRotationRequest": { "description": "CompleteIPRotationRequest moves the cluster master back into single-IP mode.", "id": "CompleteIPRotationRequest", - "properties": {}, + "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster id) of the cluster to complete IP rotation.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + } + }, "type": "object" }, "CreateClusterRequest": { @@ -1692,6 +2738,18 @@ "cluster": { "$ref": "Cluster", "description": "A [cluster\nresource](/container-engine/reference/rest/v1/projects.zones.clusters)" + }, + "parent": { + "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format 'projects/*/locations/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", + "type": "string" } }, "type": "object" @@ -1700,9 +2758,25 @@ "description": "CreateNodePoolRequest creates a node pool for a cluster.", "id": "CreateNodePoolRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + "type": "string" + }, "nodePool": { "$ref": "NodePool", "description": "The node pool to create." + }, + "parent": { + "description": "The parent (project, location, cluster id) where the node pool will be created.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", + "type": "string" } }, "type": "object" @@ -2177,6 +3251,10 @@ "description": "[Output only] The time the operation completed, in\n[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", "type": "string" }, + "location": { + "description": "[Output only] The name of the Google Compute Engine\n[zone](/compute/docs/regions-zones/regions-zones#available) or\n[region](/compute/docs/regions-zones/regions-zones#available) in which\nthe cluster resides.", + "type": "string" + }, "name": { "description": "The server-assigned ID for the operation.", "type": "string" @@ -2258,7 +3336,7 @@ "type": "string" }, "zone": { - "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation\nis taking place.", + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation\nis taking place.\nThis field is deprecated, use location instead.", "type": "string" } }, @@ -2267,7 +3345,28 @@ "RollbackNodePoolUpgradeRequest": { "description": "RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed\nNodePool upgrade. This will be an no-op if the last upgrade successfully\ncompleted.", "id": "RollbackNodePoolUpgradeRequest", - "properties": {}, + "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "type": "string" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + } + }, "type": "object" }, "ServerConfig": { @@ -2313,6 +3412,22 @@ "addonsConfig": { "$ref": "AddonsConfig", "description": "The desired configurations for the various addons available to run in the\ncluster." + }, + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2321,16 +3436,32 @@ "description": "SetLabelsRequest sets the Google Cloud Platform labels on a Google Container\nEngine cluster, which will in turn set them for Google Compute Engine\nresources used by that cluster", "id": "SetLabelsRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "labelFingerprint": { "description": "The fingerprint of the previous set of labels for this resource,\nused to detect conflicts. The fingerprint is initially generated by\nKubernetes Engine and changes after every request to modify or update\nlabels. You must always provide an up-to-date fingerprint hash when\nupdating or changing labels. Make a \u003ccode\u003eget()\u003c/code\u003e request to the\nresource to get the latest fingerprint.", "type": "string" }, + "name": { + "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "resourceLabels": { "additionalProperties": { "type": "string" }, "description": "The labels to set for that cluster.", "type": "object" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2339,9 +3470,25 @@ "description": "SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for\na cluster.", "id": "SetLegacyAbacRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "enabled": { "description": "Whether ABAC authorization will be enabled in the cluster.", "type": "boolean" + }, + "name": { + "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2350,12 +3497,28 @@ "description": "SetLocationsRequest sets the locations of the cluster.", "id": "SetLocationsRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "locations": { "description": "The desired list of Google Compute Engine\n[locations](/compute/docs/zones#available) in which the cluster's nodes\nshould be located. Changing the locations a cluster is in will result\nin nodes being either created or removed from the cluster, depending on\nwhether locations are being added or removed.\n\nThis list must always include the cluster's primary zone.", "items": { "type": "string" }, "type": "array" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2364,9 +3527,25 @@ "description": "SetLoggingServiceRequest sets the logging service of a cluster.", "id": "SetLoggingServiceRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "loggingService": { "description": "The logging service the cluster should use to write metrics.\nCurrently available options:\n\n* \"logging.googleapis.com\" - the Google Cloud Logging service\n* \"none\" - no metrics will be exported from the cluster", "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2375,9 +3554,25 @@ "description": "SetMaintenancePolicyRequest sets the maintenance policy for a cluster.", "id": "SetMaintenancePolicyRequest", "properties": { + "clusterId": { + "description": "The name of the cluster to update.", + "type": "string" + }, "maintenancePolicy": { "$ref": "MaintenancePolicy", "description": "The maintenance policy to be set for the cluster. An empty field\nclears the existing maintenance policy." + }, + "name": { + "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + "type": "string" + }, + "zone": { + "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + "type": "string" } }, "type": "object" @@ -2402,9 +3597,25 @@ ], "type": "string" }, + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "update": { "$ref": "MasterAuth", "description": "A description of the update." + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2413,9 +3624,25 @@ "description": "SetMonitoringServiceRequest sets the monitoring service of a cluster.", "id": "SetMonitoringServiceRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "monitoringService": { "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* \"monitoring.googleapis.com\" - the Google Cloud Monitoring service\n* \"none\" - no metrics will be exported from the cluster", "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2424,9 +3651,25 @@ "description": "SetNetworkPolicyRequest enables/disables network policy for a cluster.", "id": "SetNetworkPolicyRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster id) of the cluster to set networking policy.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, "networkPolicy": { "$ref": "NetworkPolicy", "description": "Configuration options for the NetworkPolicy feature." + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2438,6 +3681,26 @@ "autoscaling": { "$ref": "NodePoolAutoscaling", "description": "Autoscaling configuration for the node pool." + }, + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n'projects/*/locations/*/clusters/*/nodePools/*'.", + "type": "string" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2446,9 +3709,29 @@ "description": "SetNodePoolManagementRequest sets the node management properties of a node\npool.", "id": "SetNodePoolManagementRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "management": { "$ref": "NodeManagement", "description": "NodeManagement configuration for the node pool." + }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n'projects/*/locations/*/clusters/*/nodePools/*'.", + "type": "string" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2457,10 +3740,30 @@ "description": "SetNodePoolSizeRequest sets the size a node\npool.", "id": "SetNodePoolSizeRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "type": "string" + }, "nodeCount": { "description": "The desired node count for the pool.", "format": "int32", "type": "integer" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2468,16 +3771,49 @@ "StartIPRotationRequest": { "description": "StartIPRotationRequest creates a new IP for the cluster and then performs\na node upgrade on each node pool to point to the new IP.", "id": "StartIPRotationRequest", - "properties": {}, + "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster id) of the cluster to start IP rotation.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + } + }, "type": "object" }, "UpdateClusterRequest": { "description": "UpdateClusterRequest updates the settings of a cluster.", "id": "UpdateClusterRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "update": { "$ref": "ClusterUpdate", "description": "A description of the update." + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + "type": "string" } }, "type": "object" @@ -2486,8 +3822,24 @@ "description": "UpdateMasterRequest updates the master of the cluster.", "id": "UpdateMasterRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "masterVersion": { - "description": "The Kubernetes version to change the master to. Use \"-\" to have the server\nautomatically select the default version.", + "description": "The Kubernetes version to change the master to.\n\nUsers may specify either explicit versions offered by Kubernetes Engine or\nversion aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", + "type": "string" + }, + "name": { + "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "type": "string" } }, @@ -2497,12 +3849,32 @@ "description": "UpdateNodePoolRequests update a node pool's image and/or version.", "id": "UpdateNodePoolRequest", "properties": { + "clusterId": { + "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "imageType": { "description": "The desired image type for the node pool.", "type": "string" }, + "name": { + "description": "The name (project, location, cluster, node pool) of the node pool to update.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + "type": "string" + }, + "nodePoolId": { + "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, "nodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", + "description": "The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by Kubernetes Engine or\nversion aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", + "type": "string" + }, + "projectId": { + "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + "type": "string" + }, + "zone": { + "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", "type": "string" } }, diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index f877c6ac2..f1f6912f2 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -77,6 +77,7 @@ func (s *Service) userAgent() string { func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} + rs.Locations = NewProjectsLocationsService(s) rs.Zones = NewProjectsZonesService(s) return rs } @@ -84,9 +85,56 @@ func NewProjectsService(s *Service) *ProjectsService { type ProjectsService struct { s *Service + Locations *ProjectsLocationsService + Zones *ProjectsZonesService } +func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { + rs := &ProjectsLocationsService{s: s} + rs.Clusters = NewProjectsLocationsClustersService(s) + rs.Operations = NewProjectsLocationsOperationsService(s) + return rs +} + +type ProjectsLocationsService struct { + s *Service + + Clusters *ProjectsLocationsClustersService + + Operations *ProjectsLocationsOperationsService +} + +func NewProjectsLocationsClustersService(s *Service) *ProjectsLocationsClustersService { + rs := &ProjectsLocationsClustersService{s: s} + rs.NodePools = NewProjectsLocationsClustersNodePoolsService(s) + return rs +} + +type ProjectsLocationsClustersService struct { + s *Service + + NodePools *ProjectsLocationsClustersNodePoolsService +} + +func NewProjectsLocationsClustersNodePoolsService(s *Service) *ProjectsLocationsClustersNodePoolsService { + rs := &ProjectsLocationsClustersNodePoolsService{s: s} + return rs +} + +type ProjectsLocationsClustersNodePoolsService struct { + s *Service +} + +func NewProjectsLocationsOperationsService(s *Service) *ProjectsLocationsOperationsService { + rs := &ProjectsLocationsOperationsService{s: s} + return rs +} + +type ProjectsLocationsOperationsService struct { + s *Service +} + func NewProjectsZonesService(s *Service) *ProjectsZonesService { rs := &ProjectsZonesService{s: s} rs.Clusters = NewProjectsZonesClustersService(s) @@ -264,6 +312,51 @@ func (s *AutoUpgradeOptions) MarshalJSON() ([]byte, error) { // CancelOperationRequest: CancelOperationRequest cancels a single // operation. type CancelOperationRequest struct { + // Name: The name (project, location, operation id) of the operation to + // cancel. + // Specified in the format 'projects/*/locations/*/operations/*'. + Name string `json:"name,omitempty"` + + // OperationId: Deprecated. The server-assigned `name` of the + // operation. + // This field has been deprecated and replaced by the name field. + OperationId string `json:"operationId,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the operation + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CancelOperationRequest) MarshalJSON() ([]byte, error) { + type NoMethod CancelOperationRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CidrBlock: CidrBlock contains an optional name and one CIDR block. @@ -403,6 +496,17 @@ type Cluster struct { // be upgraded over time; such upgrades are reflected // in // currentMasterVersion and currentNodeVersion. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "","-": picks the default Kubernetes version InitialClusterVersion string `json:"initialClusterVersion,omitempty"` // InitialNodeCount: The number of nodes to create in this cluster. You @@ -434,6 +538,15 @@ type Cluster struct { // LegacyAbac: Configuration for the legacy ABAC authorization mode. LegacyAbac *LegacyAbac `json:"legacyAbac,omitempty"` + // Location: [Output only] The name of the Google Compute + // Engine + // [zone](/compute/docs/regions-zones/regions-zones#available) + // or + // [region](/compute/docs/regions-zones/regions-zones#available) in + // which + // the cluster resides. + Location string `json:"location,omitempty"` + // Locations: The list of Google Compute // Engine // [locations](/compute/docs/zones#available) in which the cluster's @@ -579,8 +692,10 @@ type Cluster struct { // Zone: [Output only] The name of the Google Compute // Engine - // [zone](/compute/docs/zones#available) in which the cluster + // [zone](/compute/docs/zones#available) in which the + // cluster // resides. + // This field is deprecated, use location instead. Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -643,11 +758,19 @@ type ClusterUpdate struct { // feature. DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"desiredMasterAuthorizedNetworksConfig,omitempty"` - // DesiredMasterVersion: The Kubernetes version to change the master to. - // The only valid value is the - // latest supported version. Use "-" to have the server automatically - // select - // the latest version. + // DesiredMasterVersion: The Kubernetes version to change the master + // to. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the default Kubernetes version DesiredMasterVersion string `json:"desiredMasterVersion,omitempty"` // DesiredMonitoringService: The monitoring service the cluster should @@ -676,8 +799,18 @@ type ClusterUpdate struct { // DesiredNodeVersion: The Kubernetes version to change the nodes to // (typically an - // upgrade). Use `-` to upgrade to the latest version supported by - // the server. + // upgrade). + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the Kubernetes master version DesiredNodeVersion string `json:"desiredNodeVersion,omitempty"` // ForceSendFields is a list of field names (e.g. "DesiredAddonsConfig") @@ -707,6 +840,51 @@ func (s *ClusterUpdate) MarshalJSON() ([]byte, error) { // CompleteIPRotationRequest: CompleteIPRotationRequest moves the // cluster master back into single-IP mode. type CompleteIPRotationRequest struct { + // ClusterId: Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster id) of the cluster to + // complete IP rotation. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://developers.google.com/console/help/new/#projec + // tnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CompleteIPRotationRequest) MarshalJSON() ([]byte, error) { + type NoMethod CompleteIPRotationRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CreateClusterRequest: CreateClusterRequest creates a cluster. @@ -717,6 +895,26 @@ type CreateClusterRequest struct { // clusters) Cluster *Cluster `json:"cluster,omitempty"` + // Parent: The parent (project and location) where the cluster will be + // created. + // Specified in the format 'projects/*/locations/*'. + Parent string `json:"parent,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the parent field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the parent field. + Zone string `json:"zone,omitempty"` + // ForceSendFields is a list of field names (e.g. "Cluster") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -743,10 +941,35 @@ func (s *CreateClusterRequest) MarshalJSON() ([]byte, error) { // CreateNodePoolRequest: CreateNodePoolRequest creates a node pool for // a cluster. type CreateNodePoolRequest struct { + // ClusterId: Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the parent field. + ClusterId string `json:"clusterId,omitempty"` + // NodePool: The node pool to create. NodePool *NodePool `json:"nodePool,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodePool") to + // Parent: The parent (project, location, cluster id) where the node + // pool will be created. + // Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Parent string `json:"parent,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://developers.google.com/console/help/new/#projec + // tnumber). + // This field has been deprecated and replaced by the parent field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the parent field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -754,7 +977,7 @@ type CreateNodePoolRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodePool") to include in + // NullFields is a list of field names (e.g. "ClusterId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -1834,6 +2057,15 @@ type Operation struct { // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format. EndTime string `json:"endTime,omitempty"` + // Location: [Output only] The name of the Google Compute + // Engine + // [zone](/compute/docs/regions-zones/regions-zones#available) + // or + // [region](/compute/docs/regions-zones/regions-zones#available) in + // which + // the cluster resides. + Location string `json:"location,omitempty"` + // Name: The server-assigned ID for the operation. Name string `json:"name,omitempty"` @@ -1888,6 +2120,7 @@ type Operation struct { // Engine // [zone](/compute/docs/zones#available) in which the operation // is taking place. + // This field is deprecated, use location instead. Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1923,6 +2156,57 @@ func (s *Operation) MarshalJSON() ([]byte, error) { // successfully // completed. type RollbackNodePoolUpgradeRequest struct { + // ClusterId: Deprecated. The name of the cluster to rollback. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster, node pool id) of the node + // poll to + // rollback upgrade. + // Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `json:"name,omitempty"` + + // NodePoolId: Deprecated. The name of the node pool to rollback. + // This field has been deprecated and replaced by the name field. + NodePoolId string `json:"nodePoolId,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RollbackNodePoolUpgradeRequest) MarshalJSON() ([]byte, error) { + type NoMethod RollbackNodePoolUpgradeRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ServerConfig: Kubernetes Engine service configuration. @@ -1980,6 +2264,30 @@ type SetAddonsConfigRequest struct { // cluster. AddonsConfig *AddonsConfig `json:"addonsConfig,omitempty"` + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster) of the cluster to set + // addons. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + // ForceSendFields is a list of field names (e.g. "AddonsConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -2009,6 +2317,10 @@ func (s *SetAddonsConfigRequest) MarshalJSON() ([]byte, error) { // Engine // resources used by that cluster type SetLabelsRequest struct { + // ClusterId: Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // LabelFingerprint: The fingerprint of the previous set of labels for // this resource, // used to detect conflicts. The fingerprint is initially generated @@ -2022,10 +2334,30 @@ type SetLabelsRequest struct { // resource to get the latest fingerprint. LabelFingerprint string `json:"labelFingerprint,omitempty"` + // Name: The name (project, location, cluster id) of the cluster to set + // labels. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://developers.google.com/console/help/new/#projec + // tnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + // ResourceLabels: The labels to set for that cluster. ResourceLabels map[string]string `json:"resourceLabels,omitempty"` - // ForceSendFields is a list of field names (e.g. "LabelFingerprint") to + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2033,13 +2365,12 @@ type SetLabelsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LabelFingerprint") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -2053,10 +2384,34 @@ func (s *SetLabelsRequest) MarshalJSON() ([]byte, error) { // ABAC authorization mechanism for // a cluster. type SetLegacyAbacRequest struct { + // ClusterId: Deprecated. The name of the cluster to update. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // Enabled: Whether ABAC authorization will be enabled in the cluster. Enabled bool `json:"enabled,omitempty"` - // ForceSendFields is a list of field names (e.g. "Enabled") to + // Name: The name (project, location, cluster id) of the cluster to set + // legacy abac. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2064,7 +2419,7 @@ type SetLegacyAbacRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Enabled") to include in + // NullFields is a list of field names (e.g. "ClusterId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2082,6 +2437,10 @@ func (s *SetLegacyAbacRequest) MarshalJSON() ([]byte, error) { // SetLocationsRequest: SetLocationsRequest sets the locations of the // cluster. type SetLocationsRequest struct { + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // Locations: The desired list of Google Compute // Engine // [locations](/compute/docs/zones#available) in which the cluster's @@ -2095,7 +2454,27 @@ type SetLocationsRequest struct { // This list must always include the cluster's primary zone. Locations []string `json:"locations,omitempty"` - // ForceSendFields is a list of field names (e.g. "Locations") to + // Name: The name (project, location, cluster) of the cluster to set + // locations. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2103,7 +2482,7 @@ type SetLocationsRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Locations") to include in + // NullFields is a list of field names (e.g. "ClusterId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2121,6 +2500,10 @@ func (s *SetLocationsRequest) MarshalJSON() ([]byte, error) { // SetLoggingServiceRequest: SetLoggingServiceRequest sets the logging // service of a cluster. type SetLoggingServiceRequest struct { + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // LoggingService: The logging service the cluster should use to write // metrics. // Currently available options: @@ -2129,7 +2512,27 @@ type SetLoggingServiceRequest struct { // * "none" - no metrics will be exported from the cluster LoggingService string `json:"loggingService,omitempty"` - // ForceSendFields is a list of field names (e.g. "LoggingService") to + // Name: The name (project, location, cluster) of the cluster to set + // logging. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2137,13 +2540,12 @@ type SetLoggingServiceRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "LoggingService") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -2156,26 +2558,45 @@ func (s *SetLoggingServiceRequest) MarshalJSON() ([]byte, error) { // SetMaintenancePolicyRequest: SetMaintenancePolicyRequest sets the // maintenance policy for a cluster. type SetMaintenancePolicyRequest struct { + // ClusterId: The name of the cluster to update. + ClusterId string `json:"clusterId,omitempty"` + // MaintenancePolicy: The maintenance policy to be set for the cluster. // An empty field // clears the existing maintenance policy. MaintenancePolicy *MaintenancePolicy `json:"maintenancePolicy,omitempty"` - // ForceSendFields is a list of field names (e.g. "MaintenancePolicy") - // to unconditionally include in API requests. By default, fields with + // Name: The name (project, location, cluster id) of the cluster to set + // maintenance + // policy. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + ProjectId string `json:"projectId,omitempty"` + + // Zone: The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the cluster + // resides. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MaintenancePolicy") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -2203,9 +2624,33 @@ type SetMasterAuthRequest struct { // one. Action string `json:"action,omitempty"` + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster) of the cluster to set + // auth. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + // Update: A description of the update. Update *MasterAuth `json:"update,omitempty"` + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + // ForceSendFields is a list of field names (e.g. "Action") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -2232,6 +2677,10 @@ func (s *SetMasterAuthRequest) MarshalJSON() ([]byte, error) { // SetMonitoringServiceRequest: SetMonitoringServiceRequest sets the // monitoring service of a cluster. type SetMonitoringServiceRequest struct { + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // MonitoringService: The monitoring service the cluster should use to // write metrics. // Currently available options: @@ -2240,21 +2689,40 @@ type SetMonitoringServiceRequest struct { // * "none" - no metrics will be exported from the cluster MonitoringService string `json:"monitoringService,omitempty"` - // ForceSendFields is a list of field names (e.g. "MonitoringService") - // to unconditionally include in API requests. By default, fields with + // Name: The name (project, location, cluster) of the cluster to set + // monitoring. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to + // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MonitoringService") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } @@ -2267,10 +2735,34 @@ func (s *SetMonitoringServiceRequest) MarshalJSON() ([]byte, error) { // SetNetworkPolicyRequest: SetNetworkPolicyRequest enables/disables // network policy for a cluster. type SetNetworkPolicyRequest struct { + // ClusterId: Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster id) of the cluster to set + // networking policy. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + // NetworkPolicy: Configuration options for the NetworkPolicy feature. NetworkPolicy *NetworkPolicy `json:"networkPolicy,omitempty"` - // ForceSendFields is a list of field names (e.g. "NetworkPolicy") to + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://developers.google.com/console/help/new/#projec + // tnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2278,10 +2770,10 @@ type SetNetworkPolicyRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NetworkPolicy") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -2299,6 +2791,36 @@ type SetNodePoolAutoscalingRequest struct { // Autoscaling: Autoscaling configuration for the node pool. Autoscaling *NodePoolAutoscaling `json:"autoscaling,omitempty"` + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster, node pool) of the node + // pool to set + // autoscaler settings. Specified in the + // format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `json:"name,omitempty"` + + // NodePoolId: Deprecated. The name of the node pool to upgrade. + // This field has been deprecated and replaced by the name field. + NodePoolId string `json:"nodePoolId,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + // ForceSendFields is a list of field names (e.g. "Autoscaling") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -2326,10 +2848,40 @@ func (s *SetNodePoolAutoscalingRequest) MarshalJSON() ([]byte, error) { // node management properties of a node // pool. type SetNodePoolManagementRequest struct { + // ClusterId: Deprecated. The name of the cluster to update. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // Management: NodeManagement configuration for the node pool. Management *NodeManagement `json:"management,omitempty"` - // ForceSendFields is a list of field names (e.g. "Management") to + // Name: The name (project, location, cluster, node pool id) of the node + // pool to set + // management properties. Specified in the + // format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `json:"name,omitempty"` + + // NodePoolId: Deprecated. The name of the node pool to update. + // This field has been deprecated and replaced by the name field. + NodePoolId string `json:"nodePoolId,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2337,7 +2889,7 @@ type SetNodePoolManagementRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Management") to include in + // NullFields is a list of field names (e.g. "ClusterId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2356,10 +2908,40 @@ func (s *SetNodePoolManagementRequest) MarshalJSON() ([]byte, error) { // node // pool. type SetNodePoolSizeRequest struct { + // ClusterId: Deprecated. The name of the cluster to update. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster, node pool id) of the node + // pool to set + // size. + // Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `json:"name,omitempty"` + // NodeCount: The desired node count for the pool. NodeCount int64 `json:"nodeCount,omitempty"` - // ForceSendFields is a list of field names (e.g. "NodeCount") to + // NodePoolId: Deprecated. The name of the node pool to update. + // This field has been deprecated and replaced by the name field. + NodePoolId string `json:"nodePoolId,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2367,7 +2949,7 @@ type SetNodePoolSizeRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NodeCount") to include in + // NullFields is a list of field names (e.g. "ClusterId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2386,15 +2968,31 @@ func (s *SetNodePoolSizeRequest) MarshalJSON() ([]byte, error) { // the cluster and then performs // a node upgrade on each node pool to point to the new IP. type StartIPRotationRequest struct { -} + // ClusterId: Deprecated. The name of the cluster. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` -// UpdateClusterRequest: UpdateClusterRequest updates the settings of a -// cluster. -type UpdateClusterRequest struct { - // Update: A description of the update. - Update *ClusterUpdate `json:"update,omitempty"` + // Name: The name (project, location, cluster id) of the cluster to + // start IP rotation. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Update") to + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://developers.google.com/console/help/new/#projec + // tnumber). + // This field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2402,8 +3000,61 @@ type UpdateClusterRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Update") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StartIPRotationRequest) MarshalJSON() ([]byte, error) { + type NoMethod StartIPRotationRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// UpdateClusterRequest: UpdateClusterRequest updates the settings of a +// cluster. +type UpdateClusterRequest struct { + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // Name: The name (project, location, cluster) of the cluster to + // update. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Update: A description of the update. + Update *ClusterUpdate `json:"update,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -2420,12 +3071,45 @@ func (s *UpdateClusterRequest) MarshalJSON() ([]byte, error) { // UpdateMasterRequest: UpdateMasterRequest updates the master of the // cluster. type UpdateMasterRequest struct { - // MasterVersion: The Kubernetes version to change the master to. Use - // "-" to have the server - // automatically select the default version. + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + + // MasterVersion: The Kubernetes version to change the master to. + // + // Users may specify either explicit versions offered by Kubernetes + // Engine or + // version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the default Kubernetes version MasterVersion string `json:"masterVersion,omitempty"` - // ForceSendFields is a list of field names (e.g. "MasterVersion") to + // Name: The name (project, location, cluster) of the cluster to + // update. + // Specified in the format 'projects/*/locations/*/clusters/*'. + Name string `json:"name,omitempty"` + + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2433,10 +3117,10 @@ type UpdateMasterRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "MasterVersion") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ClusterId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -2451,16 +3135,55 @@ func (s *UpdateMasterRequest) MarshalJSON() ([]byte, error) { // UpdateNodePoolRequest: UpdateNodePoolRequests update a node pool's // image and/or version. type UpdateNodePoolRequest struct { + // ClusterId: Deprecated. The name of the cluster to upgrade. + // This field has been deprecated and replaced by the name field. + ClusterId string `json:"clusterId,omitempty"` + // ImageType: The desired image type for the node pool. ImageType string `json:"imageType,omitempty"` + // Name: The name (project, location, cluster, node pool) of the node + // pool to update. + // Specified in the format + // 'projects/*/locations/*/clusters/*/nodePools/*'. + Name string `json:"name,omitempty"` + + // NodePoolId: Deprecated. The name of the node pool to upgrade. + // This field has been deprecated and replaced by the name field. + NodePoolId string `json:"nodePoolId,omitempty"` + // NodeVersion: The Kubernetes version to change the nodes to (typically // an - // upgrade). Use `-` to upgrade to the latest version supported by - // the server. + // upgrade). + // + // Users may specify either explicit versions offered by Kubernetes + // Engine or + // version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the Kubernetes master version NodeVersion string `json:"nodeVersion,omitempty"` - // ForceSendFields is a list of field names (e.g. "ImageType") to + // ProjectId: Deprecated. The Google Developers Console [project ID or + // project + // number](https://support.google.com/cloud/answer/6158840). + // This + // field has been deprecated and replaced by the name field. + ProjectId string `json:"projectId,omitempty"` + + // Zone: Deprecated. The name of the Google Compute + // Engine + // [zone](/compute/docs/zones#available) in which the + // cluster + // resides. + // This field has been deprecated and replaced by the name field. + Zone string `json:"zone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClusterId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -2468,7 +3191,7 @@ type UpdateNodePoolRequest struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ImageType") to include in + // NullFields is a list of field names (e.g. "ClusterId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -2483,6 +3206,4491 @@ func (s *UpdateNodePoolRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// method id "container.projects.locations.getServerConfig": + +type ProjectsLocationsGetServerConfigCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetServerConfig: Returns configuration info about the Kubernetes +// Engine service. +func (r *ProjectsLocationsService) GetServerConfig(name string) *ProjectsLocationsGetServerConfigCall { + c := &ProjectsLocationsGetServerConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://support.google.com/cloud/answer/6158840). +// This +// field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsGetServerConfigCall) ProjectId(projectId string) *ProjectsLocationsGetServerConfigCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) to return operations for. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsGetServerConfigCall) Zone(zone string) *ProjectsLocationsGetServerConfigCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsGetServerConfigCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetServerConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsGetServerConfigCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetServerConfigCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsGetServerConfigCall) Context(ctx context.Context) *ProjectsLocationsGetServerConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsGetServerConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsGetServerConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/serverConfig") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.getServerConfig" call. +// Exactly one of *ServerConfig or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServerConfig.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsGetServerConfigCall) Do(opts ...googleapi.CallOption) (*ServerConfig, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServerConfig{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns configuration info about the Kubernetes Engine service.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/serverConfig", + // "httpMethod": "GET", + // "id": "container.projects.locations.getServerConfig", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) to return operations for.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}/serverConfig", + // "response": { + // "$ref": "ServerConfig" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.completeIpRotation": + +type ProjectsLocationsClustersCompleteIpRotationCall struct { + s *Service + name string + completeiprotationrequest *CompleteIPRotationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// CompleteIpRotation: Completes master IP rotation. +func (r *ProjectsLocationsClustersService) CompleteIpRotation(name string, completeiprotationrequest *CompleteIPRotationRequest) *ProjectsLocationsClustersCompleteIpRotationCall { + c := &ProjectsLocationsClustersCompleteIpRotationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.completeiprotationrequest = completeiprotationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersCompleteIpRotationCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersCompleteIpRotationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersCompleteIpRotationCall) Context(ctx context.Context) *ProjectsLocationsClustersCompleteIpRotationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersCompleteIpRotationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersCompleteIpRotationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.completeiprotationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:completeIpRotation") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.completeIpRotation" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersCompleteIpRotationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Completes master IP rotation.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:completeIpRotation", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.completeIpRotation", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster id) of the cluster to complete IP rotation.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:completeIpRotation", + // "request": { + // "$ref": "CompleteIPRotationRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.create": + +type ProjectsLocationsClustersCreateCall struct { + s *Service + parent string + createclusterrequest *CreateClusterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a cluster, consisting of the specified number and +// type of Google +// Compute Engine instances. +// +// By default, the cluster is created in the project's +// [default +// network](/compute/docs/networks-and-firewalls#networks). +// +// One firewall is added for the cluster. After cluster creation, +// the cluster creates routes for each node to allow the containers +// on that node to communicate with all other instances in +// the +// cluster. +// +// Finally, an entry is added to the project's global metadata +// indicating +// which CIDR range is being used by the cluster. +func (r *ProjectsLocationsClustersService) Create(parent string, createclusterrequest *CreateClusterRequest) *ProjectsLocationsClustersCreateCall { + c := &ProjectsLocationsClustersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.createclusterrequest = createclusterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersCreateCall) Context(ctx context.Context) *ProjectsLocationsClustersCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/clusters") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a cluster, consisting of the specified number and type of Google\nCompute Engine instances.\n\nBy default, the cluster is created in the project's\n[default network](/compute/docs/networks-and-firewalls#networks).\n\nOne firewall is added for the cluster. After cluster creation,\nthe cluster creates routes for each node to allow the containers\non that node to communicate with all other instances in the\ncluster.\n\nFinally, an entry is added to the project's global metadata indicating\nwhich CIDR range is being used by the cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent (project and location) where the cluster will be created.\nSpecified in the format 'projects/*/locations/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/clusters", + // "request": { + // "$ref": "CreateClusterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.delete": + +type ProjectsLocationsClustersDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the cluster, including the Kubernetes endpoint and +// all worker +// nodes. +// +// Firewalls and routes that were configured during cluster creation +// are also deleted. +// +// Other Google Compute Engine resources that might be in use by the +// cluster +// (e.g. load balancer resources) will not be deleted if they weren't +// present +// at the initial create time. +func (r *ProjectsLocationsClustersService) Delete(name string) *ProjectsLocationsClustersDeleteCall { + c := &ProjectsLocationsClustersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ClusterId sets the optional parameter "clusterId": Deprecated. The +// name of the cluster to delete. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersDeleteCall) ClusterId(clusterId string) *ProjectsLocationsClustersDeleteCall { + c.urlParams_.Set("clusterId", clusterId) + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://support.google.com/cloud/answer/6158840). +// This +// field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersDeleteCall) ProjectId(projectId string) *ProjectsLocationsClustersDeleteCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the +// cluster +// resides. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersDeleteCall) Zone(zone string) *ProjectsLocationsClustersDeleteCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersDeleteCall) Context(ctx context.Context) *ProjectsLocationsClustersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the cluster, including the Kubernetes endpoint and all worker\nnodes.\n\nFirewalls and routes that were configured during cluster creation\nare also deleted.\n\nOther Google Compute Engine resources that might be in use by the cluster\n(e.g. load balancer resources) will not be deleted if they weren't present\nat the initial create time.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", + // "httpMethod": "DELETE", + // "id": "container.projects.locations.clusters.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "clusterId": { + // "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.get": + +type ProjectsLocationsClustersGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the details of a specific cluster. +func (r *ProjectsLocationsClustersService) Get(name string) *ProjectsLocationsClustersGetCall { + c := &ProjectsLocationsClustersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ClusterId sets the optional parameter "clusterId": Deprecated. The +// name of the cluster to retrieve. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersGetCall) ClusterId(clusterId string) *ProjectsLocationsClustersGetCall { + c.urlParams_.Set("clusterId", clusterId) + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://support.google.com/cloud/answer/6158840). +// This +// field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersGetCall) ProjectId(projectId string) *ProjectsLocationsClustersGetCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the +// cluster +// resides. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersGetCall) Zone(zone string) *ProjectsLocationsClustersGetCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersGetCall) Context(ctx context.Context) *ProjectsLocationsClustersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.get" call. +// Exactly one of *Cluster or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Cluster.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluster, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Cluster{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the details of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "clusterId": { + // "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Cluster" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.list": + +type ProjectsLocationsClustersListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all clusters owned by a project in either the specified +// zone or all +// zones. +func (r *ProjectsLocationsClustersService) List(parent string) *ProjectsLocationsClustersListCall { + c := &ProjectsLocationsClustersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://support.google.com/cloud/answer/6158840). +// This +// field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsClustersListCall) ProjectId(projectId string) *ProjectsLocationsClustersListCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the cluster +// resides, or "-" for all zones. +// This field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsClustersListCall) Zone(zone string) *ProjectsLocationsClustersListCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersListCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersListCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersListCall) Context(ctx context.Context) *ProjectsLocationsClustersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/clusters") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.list" call. +// Exactly one of *ListClustersResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListClustersResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersListCall) Do(opts ...googleapi.CallOption) (*ListClustersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListClustersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all clusters owned by a project in either the specified zone or all\nzones.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/clusters", + // "response": { + // "$ref": "ListClustersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setAddons": + +type ProjectsLocationsClustersSetAddonsCall struct { + s *Service + name string + setaddonsconfigrequest *SetAddonsConfigRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetAddons: Sets the addons of a specific cluster. +func (r *ProjectsLocationsClustersService) SetAddons(name string, setaddonsconfigrequest *SetAddonsConfigRequest) *ProjectsLocationsClustersSetAddonsCall { + c := &ProjectsLocationsClustersSetAddonsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setaddonsconfigrequest = setaddonsconfigrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetAddonsCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetAddonsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetAddonsCall) Context(ctx context.Context) *ProjectsLocationsClustersSetAddonsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetAddonsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetAddonsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setaddonsconfigrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setAddons") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setAddons" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetAddonsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the addons of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setAddons", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setAddons", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to set addons.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setAddons", + // "request": { + // "$ref": "SetAddonsConfigRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setLegacyAbac": + +type ProjectsLocationsClustersSetLegacyAbacCall struct { + s *Service + name string + setlegacyabacrequest *SetLegacyAbacRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLegacyAbac: Enables or disables the ABAC authorization mechanism +// on a cluster. +func (r *ProjectsLocationsClustersService) SetLegacyAbac(name string, setlegacyabacrequest *SetLegacyAbacRequest) *ProjectsLocationsClustersSetLegacyAbacCall { + c := &ProjectsLocationsClustersSetLegacyAbacCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setlegacyabacrequest = setlegacyabacrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetLegacyAbacCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetLegacyAbacCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetLegacyAbacCall) Context(ctx context.Context) *ProjectsLocationsClustersSetLegacyAbacCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetLegacyAbacCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetLegacyAbacCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setlegacyabacrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setLegacyAbac") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setLegacyAbac" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetLegacyAbacCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables or disables the ABAC authorization mechanism on a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLegacyAbac", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setLegacyAbac", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster id) of the cluster to set legacy abac.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setLegacyAbac", + // "request": { + // "$ref": "SetLegacyAbacRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setLocations": + +type ProjectsLocationsClustersSetLocationsCall struct { + s *Service + name string + setlocationsrequest *SetLocationsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLocations: Sets the locations of a specific cluster. +func (r *ProjectsLocationsClustersService) SetLocations(name string, setlocationsrequest *SetLocationsRequest) *ProjectsLocationsClustersSetLocationsCall { + c := &ProjectsLocationsClustersSetLocationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setlocationsrequest = setlocationsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetLocationsCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetLocationsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetLocationsCall) Context(ctx context.Context) *ProjectsLocationsClustersSetLocationsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetLocationsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetLocationsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setlocationsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setLocations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setLocations" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetLocationsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the locations of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLocations", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setLocations", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to set locations.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setLocations", + // "request": { + // "$ref": "SetLocationsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setLogging": + +type ProjectsLocationsClustersSetLoggingCall struct { + s *Service + name string + setloggingservicerequest *SetLoggingServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetLogging: Sets the logging service of a specific cluster. +func (r *ProjectsLocationsClustersService) SetLogging(name string, setloggingservicerequest *SetLoggingServiceRequest) *ProjectsLocationsClustersSetLoggingCall { + c := &ProjectsLocationsClustersSetLoggingCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setloggingservicerequest = setloggingservicerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetLoggingCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetLoggingCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetLoggingCall) Context(ctx context.Context) *ProjectsLocationsClustersSetLoggingCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetLoggingCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetLoggingCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setloggingservicerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setLogging") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setLogging" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetLoggingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the logging service of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setLogging", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setLogging", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to set logging.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setLogging", + // "request": { + // "$ref": "SetLoggingServiceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setMaintenancePolicy": + +type ProjectsLocationsClustersSetMaintenancePolicyCall struct { + s *Service + name string + setmaintenancepolicyrequest *SetMaintenancePolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMaintenancePolicy: Sets the maintenance policy for a cluster. +func (r *ProjectsLocationsClustersService) SetMaintenancePolicy(name string, setmaintenancepolicyrequest *SetMaintenancePolicyRequest) *ProjectsLocationsClustersSetMaintenancePolicyCall { + c := &ProjectsLocationsClustersSetMaintenancePolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setmaintenancepolicyrequest = setmaintenancepolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetMaintenancePolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Context(ctx context.Context) *ProjectsLocationsClustersSetMaintenancePolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setmaintenancepolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setMaintenancePolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setMaintenancePolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetMaintenancePolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the maintenance policy for a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMaintenancePolicy", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setMaintenancePolicy", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster id) of the cluster to set maintenance\npolicy.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setMaintenancePolicy", + // "request": { + // "$ref": "SetMaintenancePolicyRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setMasterAuth": + +type ProjectsLocationsClustersSetMasterAuthCall struct { + s *Service + name string + setmasterauthrequest *SetMasterAuthRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMasterAuth: Used to set master auth materials. Currently supports +// :- +// Changing the admin password of a specific cluster. +// This can be either via password generation or explicitly set the +// password. +func (r *ProjectsLocationsClustersService) SetMasterAuth(name string, setmasterauthrequest *SetMasterAuthRequest) *ProjectsLocationsClustersSetMasterAuthCall { + c := &ProjectsLocationsClustersSetMasterAuthCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setmasterauthrequest = setmasterauthrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetMasterAuthCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetMasterAuthCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetMasterAuthCall) Context(ctx context.Context) *ProjectsLocationsClustersSetMasterAuthCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetMasterAuthCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetMasterAuthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setmasterauthrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setMasterAuth") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setMasterAuth" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Used to set master auth materials. Currently supports :-\nChanging the admin password of a specific cluster.\nThis can be either via password generation or explicitly set the password.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMasterAuth", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setMasterAuth", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to set auth.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setMasterAuth", + // "request": { + // "$ref": "SetMasterAuthRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setMonitoring": + +type ProjectsLocationsClustersSetMonitoringCall struct { + s *Service + name string + setmonitoringservicerequest *SetMonitoringServiceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetMonitoring: Sets the monitoring service of a specific cluster. +func (r *ProjectsLocationsClustersService) SetMonitoring(name string, setmonitoringservicerequest *SetMonitoringServiceRequest) *ProjectsLocationsClustersSetMonitoringCall { + c := &ProjectsLocationsClustersSetMonitoringCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setmonitoringservicerequest = setmonitoringservicerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetMonitoringCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetMonitoringCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetMonitoringCall) Context(ctx context.Context) *ProjectsLocationsClustersSetMonitoringCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetMonitoringCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetMonitoringCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setmonitoringservicerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setMonitoring") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setMonitoring" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetMonitoringCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the monitoring service of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setMonitoring", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setMonitoring", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to set monitoring.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setMonitoring", + // "request": { + // "$ref": "SetMonitoringServiceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setNetworkPolicy": + +type ProjectsLocationsClustersSetNetworkPolicyCall struct { + s *Service + name string + setnetworkpolicyrequest *SetNetworkPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetNetworkPolicy: Enables/Disables Network Policy for a cluster. +func (r *ProjectsLocationsClustersService) SetNetworkPolicy(name string, setnetworkpolicyrequest *SetNetworkPolicyRequest) *ProjectsLocationsClustersSetNetworkPolicyCall { + c := &ProjectsLocationsClustersSetNetworkPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setnetworkpolicyrequest = setnetworkpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetNetworkPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Context(ctx context.Context) *ProjectsLocationsClustersSetNetworkPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetNetworkPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setnetworkpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setNetworkPolicy") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setNetworkPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetNetworkPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enables/Disables Network Policy for a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setNetworkPolicy", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setNetworkPolicy", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster id) of the cluster to set networking policy.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setNetworkPolicy", + // "request": { + // "$ref": "SetNetworkPolicyRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.setResourceLabels": + +type ProjectsLocationsClustersSetResourceLabelsCall struct { + s *Service + name string + setlabelsrequest *SetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetResourceLabels: Sets labels on a cluster. +func (r *ProjectsLocationsClustersService) SetResourceLabels(name string, setlabelsrequest *SetLabelsRequest) *ProjectsLocationsClustersSetResourceLabelsCall { + c := &ProjectsLocationsClustersSetResourceLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setlabelsrequest = setlabelsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersSetResourceLabelsCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersSetResourceLabelsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersSetResourceLabelsCall) Context(ctx context.Context) *ProjectsLocationsClustersSetResourceLabelsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersSetResourceLabelsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersSetResourceLabelsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setResourceLabels") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.setResourceLabels" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersSetResourceLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets labels on a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:setResourceLabels", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.setResourceLabels", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster id) of the cluster to set labels.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setResourceLabels", + // "request": { + // "$ref": "SetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.startIpRotation": + +type ProjectsLocationsClustersStartIpRotationCall struct { + s *Service + name string + startiprotationrequest *StartIPRotationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// StartIpRotation: Start master IP rotation. +func (r *ProjectsLocationsClustersService) StartIpRotation(name string, startiprotationrequest *StartIPRotationRequest) *ProjectsLocationsClustersStartIpRotationCall { + c := &ProjectsLocationsClustersStartIpRotationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.startiprotationrequest = startiprotationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersStartIpRotationCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersStartIpRotationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersStartIpRotationCall) Context(ctx context.Context) *ProjectsLocationsClustersStartIpRotationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersStartIpRotationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersStartIpRotationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.startiprotationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:startIpRotation") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.startIpRotation" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersStartIpRotationCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Start master IP rotation.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:startIpRotation", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.startIpRotation", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster id) of the cluster to start IP rotation.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:startIpRotation", + // "request": { + // "$ref": "StartIPRotationRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.update": + +type ProjectsLocationsClustersUpdateCall struct { + s *Service + name string + updateclusterrequest *UpdateClusterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the settings of a specific cluster. +func (r *ProjectsLocationsClustersService) Update(name string, updateclusterrequest *UpdateClusterRequest) *ProjectsLocationsClustersUpdateCall { + c := &ProjectsLocationsClustersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.updateclusterrequest = updateclusterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersUpdateCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersUpdateCall) Context(ctx context.Context) *ProjectsLocationsClustersUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updateclusterrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the settings of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}", + // "httpMethod": "PUT", + // "id": "container.projects.locations.clusters.update", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "UpdateClusterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.updateMaster": + +type ProjectsLocationsClustersUpdateMasterCall struct { + s *Service + name string + updatemasterrequest *UpdateMasterRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// UpdateMaster: Updates the master of a specific cluster. +func (r *ProjectsLocationsClustersService) UpdateMaster(name string, updatemasterrequest *UpdateMasterRequest) *ProjectsLocationsClustersUpdateMasterCall { + c := &ProjectsLocationsClustersUpdateMasterCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.updatemasterrequest = updatemasterrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersUpdateMasterCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersUpdateMasterCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersUpdateMasterCall) Context(ctx context.Context) *ProjectsLocationsClustersUpdateMasterCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersUpdateMasterCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersUpdateMasterCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatemasterrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:updateMaster") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.updateMaster" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersUpdateMasterCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the master of a specific cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}:updateMaster", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.updateMaster", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster) of the cluster to update.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:updateMaster", + // "request": { + // "$ref": "UpdateMasterRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.create": + +type ProjectsLocationsClustersNodePoolsCreateCall struct { + s *Service + parent string + createnodepoolrequest *CreateNodePoolRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a node pool for a cluster. +func (r *ProjectsLocationsClustersNodePoolsService) Create(parent string, createnodepoolrequest *CreateNodePoolRequest) *ProjectsLocationsClustersNodePoolsCreateCall { + c := &ProjectsLocationsClustersNodePoolsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.createnodepoolrequest = createnodepoolrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsCreateCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.createnodepoolrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/nodePools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.create" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsCreateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a node pool for a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.nodePools.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent (project, location, cluster id) where the node pool will be created.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/nodePools", + // "request": { + // "$ref": "CreateNodePoolRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.delete": + +type ProjectsLocationsClustersNodePoolsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a node pool from a cluster. +func (r *ProjectsLocationsClustersNodePoolsService) Delete(name string) *ProjectsLocationsClustersNodePoolsDeleteCall { + c := &ProjectsLocationsClustersNodePoolsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ClusterId sets the optional parameter "clusterId": Deprecated. The +// name of the cluster. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsDeleteCall { + c.urlParams_.Set("clusterId", clusterId) + return c +} + +// NodePoolId sets the optional parameter "nodePoolId": Deprecated. The +// name of the node pool to delete. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) NodePoolId(nodePoolId string) *ProjectsLocationsClustersNodePoolsDeleteCall { + c.urlParams_.Set("nodePoolId", nodePoolId) + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://developers.google.com/console/help/new/#projec +// tnumber). +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsDeleteCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the +// cluster +// resides. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsDeleteCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a node pool from a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", + // "httpMethod": "DELETE", + // "id": "container.projects.locations.clusters.nodePools.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "clusterId": { + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node pool to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "nodePoolId": { + // "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.get": + +type ProjectsLocationsClustersNodePoolsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the node pool requested. +func (r *ProjectsLocationsClustersNodePoolsService) Get(name string) *ProjectsLocationsClustersNodePoolsGetCall { + c := &ProjectsLocationsClustersNodePoolsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// ClusterId sets the optional parameter "clusterId": Deprecated. The +// name of the cluster. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsGetCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsGetCall { + c.urlParams_.Set("clusterId", clusterId) + return c +} + +// NodePoolId sets the optional parameter "nodePoolId": Deprecated. The +// name of the node pool. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsGetCall) NodePoolId(nodePoolId string) *ProjectsLocationsClustersNodePoolsGetCall { + c.urlParams_.Set("nodePoolId", nodePoolId) + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://developers.google.com/console/help/new/#projec +// tnumber). +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsGetCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsGetCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the +// cluster +// resides. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsClustersNodePoolsGetCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsGetCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersNodePoolsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersNodePoolsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsGetCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.get" call. +// Exactly one of *NodePool or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *NodePool.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) (*NodePool, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &NodePool{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the node pool requested.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.nodePools.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "clusterId": { + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node pool to get.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "nodePoolId": { + // "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "NodePool" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.list": + +type ProjectsLocationsClustersNodePoolsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the node pools for a cluster. +func (r *ProjectsLocationsClustersNodePoolsService) List(parent string) *ProjectsLocationsClustersNodePoolsListCall { + c := &ProjectsLocationsClustersNodePoolsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// ClusterId sets the optional parameter "clusterId": Deprecated. The +// name of the cluster. +// This field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsClustersNodePoolsListCall) ClusterId(clusterId string) *ProjectsLocationsClustersNodePoolsListCall { + c.urlParams_.Set("clusterId", clusterId) + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://developers.google.com/console/help/new/#projec +// tnumber). +// This field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsClustersNodePoolsListCall) ProjectId(projectId string) *ProjectsLocationsClustersNodePoolsListCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the +// cluster +// resides. +// This field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsClustersNodePoolsListCall) Zone(zone string) *ProjectsLocationsClustersNodePoolsListCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsClustersNodePoolsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsClustersNodePoolsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsListCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/nodePools") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.list" call. +// Exactly one of *ListNodePoolsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListNodePoolsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsListCall) Do(opts ...googleapi.CallOption) (*ListNodePoolsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListNodePoolsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the node pools for a cluster.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools", + // "httpMethod": "GET", + // "id": "container.projects.locations.clusters.nodePools.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "clusterId": { + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent (project, location, cluster id) where the node pools will be listed.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/nodePools", + // "response": { + // "$ref": "ListNodePoolsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.rollback": + +type ProjectsLocationsClustersNodePoolsRollbackCall struct { + s *Service + name string + rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rollback: Roll back the previously Aborted or Failed NodePool +// upgrade. +// This will be an no-op if the last upgrade successfully completed. +func (r *ProjectsLocationsClustersNodePoolsService) Rollback(name string, rollbacknodepoolupgraderequest *RollbackNodePoolUpgradeRequest) *ProjectsLocationsClustersNodePoolsRollbackCall { + c := &ProjectsLocationsClustersNodePoolsRollbackCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.rollbacknodepoolupgraderequest = rollbacknodepoolupgraderequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsRollbackCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsRollbackCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsRollbackCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.rollbacknodepoolupgraderequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:rollback") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.rollback" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Roll back the previously Aborted or Failed NodePool upgrade.\nThis will be an no-op if the last upgrade successfully completed.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:rollback", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.nodePools.rollback", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node poll to\nrollback upgrade.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:rollback", + // "request": { + // "$ref": "RollbackNodePoolUpgradeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.setAutoscaling": + +type ProjectsLocationsClustersNodePoolsSetAutoscalingCall struct { + s *Service + name string + setnodepoolautoscalingrequest *SetNodePoolAutoscalingRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetAutoscaling: Sets the autoscaling settings of a specific node +// pool. +func (r *ProjectsLocationsClustersNodePoolsService) SetAutoscaling(name string, setnodepoolautoscalingrequest *SetNodePoolAutoscalingRequest) *ProjectsLocationsClustersNodePoolsSetAutoscalingCall { + c := &ProjectsLocationsClustersNodePoolsSetAutoscalingCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setnodepoolautoscalingrequest = setnodepoolautoscalingrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsSetAutoscalingCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsSetAutoscalingCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setnodepoolautoscalingrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setAutoscaling") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.setAutoscaling" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsSetAutoscalingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the autoscaling settings of a specific node pool.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setAutoscaling", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.nodePools.setAutoscaling", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster, node pool) of the node pool to set\nautoscaler settings. Specified in the format\n'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setAutoscaling", + // "request": { + // "$ref": "SetNodePoolAutoscalingRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.setManagement": + +type ProjectsLocationsClustersNodePoolsSetManagementCall struct { + s *Service + name string + setnodepoolmanagementrequest *SetNodePoolManagementRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetManagement: Sets the NodeManagement options for a node pool. +func (r *ProjectsLocationsClustersNodePoolsService) SetManagement(name string, setnodepoolmanagementrequest *SetNodePoolManagementRequest) *ProjectsLocationsClustersNodePoolsSetManagementCall { + c := &ProjectsLocationsClustersNodePoolsSetManagementCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setnodepoolmanagementrequest = setnodepoolmanagementrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsSetManagementCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsSetManagementCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setnodepoolmanagementrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setManagement") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.setManagement" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsSetManagementCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the NodeManagement options for a node pool.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setManagement", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.nodePools.setManagement", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node pool to set\nmanagement properties. Specified in the format\n'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setManagement", + // "request": { + // "$ref": "SetNodePoolManagementRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.setSize": + +type ProjectsLocationsClustersNodePoolsSetSizeCall struct { + s *Service + name string + setnodepoolsizerequest *SetNodePoolSizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSize: Sets the size of a specific node pool. +func (r *ProjectsLocationsClustersNodePoolsService) SetSize(name string, setnodepoolsizerequest *SetNodePoolSizeRequest) *ProjectsLocationsClustersNodePoolsSetSizeCall { + c := &ProjectsLocationsClustersNodePoolsSetSizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.setnodepoolsizerequest = setnodepoolsizerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsSetSizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsSetSizeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.setnodepoolsizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:setSize") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.setSize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsSetSizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the size of a specific node pool.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}:setSize", + // "httpMethod": "POST", + // "id": "container.projects.locations.clusters.nodePools.setSize", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node pool to set\nsize.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:setSize", + // "request": { + // "$ref": "SetNodePoolSizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.clusters.nodePools.update": + +type ProjectsLocationsClustersNodePoolsUpdateCall struct { + s *Service + name string + updatenodepoolrequest *UpdateNodePoolRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the version and/or image type of a specific node +// pool. +func (r *ProjectsLocationsClustersNodePoolsService) Update(name string, updatenodepoolrequest *UpdateNodePoolRequest) *ProjectsLocationsClustersNodePoolsUpdateCall { + c := &ProjectsLocationsClustersNodePoolsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.updatenodepoolrequest = updatenodepoolrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Fields(s ...googleapi.Field) *ProjectsLocationsClustersNodePoolsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Context(ctx context.Context) *ProjectsLocationsClustersNodePoolsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsClustersNodePoolsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.updatenodepoolrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.clusters.nodePools.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsClustersNodePoolsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the version and/or image type of a specific node pool.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/clusters/{clustersId}/nodePools/{nodePoolsId}", + // "httpMethod": "PUT", + // "id": "container.projects.locations.clusters.nodePools.update", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, cluster, node pool) of the node pool to update.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/clusters/[^/]+/nodePools/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "request": { + // "$ref": "UpdateNodePoolRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.operations.cancel": + +type ProjectsLocationsOperationsCancelCall struct { + s *Service + name string + canceloperationrequest *CancelOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Cancels the specified operation. +func (r *ProjectsLocationsOperationsService) Cancel(name string, canceloperationrequest *CancelOperationRequest) *ProjectsLocationsOperationsCancelCall { + c := &ProjectsLocationsOperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.canceloperationrequest = canceloperationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsOperationsCancelCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsOperationsCancelCall) Context(ctx context.Context) *ProjectsLocationsOperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsOperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.canceloperationrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.operations.cancel" call. +// Exactly one of *Empty or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Empty.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsLocationsOperationsCancelCall) Do(opts ...googleapi.CallOption) (*Empty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Empty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Cancels the specified operation.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel", + // "httpMethod": "POST", + // "id": "container.projects.locations.operations.cancel", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, operation id) of the operation to cancel.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}:cancel", + // "request": { + // "$ref": "CancelOperationRequest" + // }, + // "response": { + // "$ref": "Empty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.operations.get": + +type ProjectsLocationsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the specified operation. +func (r *ProjectsLocationsOperationsService) Get(name string) *ProjectsLocationsOperationsGetCall { + c := &ProjectsLocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// OperationId sets the optional parameter "operationId": Deprecated. +// The server-assigned `name` of the operation. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsOperationsGetCall) OperationId(operationId string) *ProjectsLocationsOperationsGetCall { + c.urlParams_.Set("operationId", operationId) + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://support.google.com/cloud/answer/6158840). +// This +// field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsOperationsGetCall) ProjectId(projectId string) *ProjectsLocationsOperationsGetCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine +// [zone](/compute/docs/zones#available) in which the +// cluster +// resides. +// This field has been deprecated and replaced by the name field. +func (c *ProjectsLocationsOperationsGetCall) Zone(zone string) *ProjectsLocationsOperationsGetCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsOperationsGetCall) Context(ctx context.Context) *ProjectsLocationsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the specified operation.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "container.projects.locations.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "operationId": { + // "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "container.projects.locations.operations.list": + +type ProjectsLocationsOperationsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists all operations in a project in a specific zone or all +// zones. +func (r *ProjectsLocationsOperationsService) List(parent string) *ProjectsLocationsOperationsListCall { + c := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// ProjectId sets the optional parameter "projectId": Deprecated. The +// Google Developers Console [project ID or +// project +// number](https://support.google.com/cloud/answer/6158840). +// This +// field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsOperationsListCall) ProjectId(projectId string) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("projectId", projectId) + return c +} + +// Zone sets the optional parameter "zone": Deprecated. The name of the +// Google Compute Engine [zone](/compute/docs/zones#available) +// to return operations for, or `-` for all zones. +// This field has been deprecated and replaced by the parent field. +func (c *ProjectsLocationsOperationsListCall) Zone(zone string) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("zone", zone) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsOperationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsOperationsListCall) Context(ctx context.Context) *ProjectsLocationsOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+parent}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "container.projects.locations.operations.list" call. +// Exactly one of *ListOperationsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ListOperationsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsOperationsListCall) Do(opts ...googleapi.CallOption) (*ListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists all operations in a project in a specific zone or all zones.", + // "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/operations", + // "httpMethod": "GET", + // "id": "container.projects.locations.operations.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Deprecated. The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.\nThis field has been deprecated and replaced by the parent field.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v1/{+parent}/operations", + // "response": { + // "$ref": "ListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + // method id "container.projects.zones.getServerconfig": type ProjectsZonesGetServerconfigCall struct { @@ -2504,6 +7712,14 @@ func (r *ProjectsZonesService) GetServerconfig(projectId string, zone string) *P return c } +// Name sets the optional parameter "name": The name (project and +// location) of the server config to get +// Specified in the format 'projects/*/locations/*'. +func (c *ProjectsZonesGetServerconfigCall) Name(name string) *ProjectsZonesGetServerconfigCall { + c.urlParams_.Set("name", name) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -2608,14 +7824,19 @@ func (c *ProjectsZonesGetServerconfigCall) Do(opts ...googleapi.CallOption) (*Se // "zone" // ], // "parameters": { + // "name": { + // "description": "The name (project and location) of the server config to get\nSpecified in the format 'projects/*/locations/*'.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) to return operations for.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -2754,19 +7975,19 @@ func (c *ProjectsZonesClustersAddonsCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -2908,19 +8129,19 @@ func (c *ProjectsZonesClustersCompleteIpRotationCall) Do(opts ...googleapi.CallO // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -3074,13 +8295,13 @@ func (c *ProjectsZonesClustersCreateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -3132,6 +8353,14 @@ func (r *ProjectsZonesClustersService) Delete(projectId string, zone string, clu return c } +// Name sets the optional parameter "name": The name (project, location, +// cluster) of the cluster to delete. +// Specified in the format 'projects/*/locations/*/clusters/*'. +func (c *ProjectsZonesClustersDeleteCall) Name(name string) *ProjectsZonesClustersDeleteCall { + c.urlParams_.Set("name", name) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3226,19 +8455,24 @@ func (c *ProjectsZonesClustersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to delete.", + // "description": "Deprecated. The name of the cluster to delete.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, + // "name": { + // "description": "The name (project, location, cluster) of the cluster to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -3277,6 +8511,14 @@ func (r *ProjectsZonesClustersService) Get(projectId string, zone string, cluste return c } +// Name sets the optional parameter "name": The name (project, location, +// cluster) of the cluster to retrieve. +// Specified in the format 'projects/*/locations/*/clusters/*'. +func (c *ProjectsZonesClustersGetCall) Name(name string) *ProjectsZonesClustersGetCall { + c.urlParams_.Set("name", name) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3384,19 +8626,24 @@ func (c *ProjectsZonesClustersGetCall) Do(opts ...googleapi.CallOption) (*Cluste // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to retrieve.", + // "description": "Deprecated. The name of the cluster to retrieve.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, + // "name": { + // "description": "The name (project, location, cluster) of the cluster to retrieve.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -3536,19 +8783,19 @@ func (c *ProjectsZonesClustersLegacyAbacCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to update.", + // "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -3590,6 +8837,15 @@ func (r *ProjectsZonesClustersService) List(projectId string, zone string) *Proj return c } +// Parent sets the optional parameter "parent": The parent (project and +// location) where the clusters will be listed. +// Specified in the format 'projects/*/locations/*'. +// Location "-" matches all zones and all regions. +func (c *ProjectsZonesClustersListCall) Parent(parent string) *ProjectsZonesClustersListCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -3694,14 +8950,19 @@ func (c *ProjectsZonesClustersListCall) Do(opts ...googleapi.CallOption) (*ListC // "zone" // ], // "parameters": { + // "parent": { + // "description": "The parent (project and location) where the clusters will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides, or \"-\" for all zones.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -3840,19 +9101,19 @@ func (c *ProjectsZonesClustersLocationsCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -3994,19 +9255,19 @@ func (c *ProjectsZonesClustersLoggingCall) Do(opts ...googleapi.CallOption) (*Op // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -4148,19 +9409,19 @@ func (c *ProjectsZonesClustersMasterCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -4302,19 +9563,19 @@ func (c *ProjectsZonesClustersMonitoringCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -4456,19 +9717,19 @@ func (c *ProjectsZonesClustersResourceLabelsCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -4768,19 +10029,19 @@ func (c *ProjectsZonesClustersSetMasterAuthCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -4922,19 +10183,19 @@ func (c *ProjectsZonesClustersSetNetworkPolicyCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -5076,19 +10337,19 @@ func (c *ProjectsZonesClustersStartIpRotationCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -5230,19 +10491,19 @@ func (c *ProjectsZonesClustersUpdateCall) Do(opts ...googleapi.CallOption) (*Ope // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -5388,25 +10649,25 @@ func (c *ProjectsZonesClustersNodePoolsAutoscalingCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "The name of the node pool to upgrade.", + // "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -5548,19 +10809,19 @@ func (c *ProjectsZonesClustersNodePoolsCreateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -5603,6 +10864,15 @@ func (r *ProjectsZonesClustersNodePoolsService) Delete(projectId string, zone st return c } +// Name sets the optional parameter "name": The name (project, location, +// cluster, node pool id) of the node pool to delete. +// Specified in the format +// 'projects/*/locations/*/clusters/*/nodePools/*'. +func (c *ProjectsZonesClustersNodePoolsDeleteCall) Name(name string) *ProjectsZonesClustersNodePoolsDeleteCall { + c.urlParams_.Set("name", name) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -5699,25 +10969,30 @@ func (c *ProjectsZonesClustersNodePoolsDeleteCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node pool to delete.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "query", + // "type": "string" + // }, // "nodePoolId": { - // "description": "The name of the node pool to delete.", + // "description": "Deprecated. The name of the node pool to delete.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -5758,6 +11033,15 @@ func (r *ProjectsZonesClustersNodePoolsService) Get(projectId string, zone strin return c } +// Name sets the optional parameter "name": The name (project, location, +// cluster, node pool id) of the node pool to get. +// Specified in the format +// 'projects/*/locations/*/clusters/*/nodePools/*'. +func (c *ProjectsZonesClustersNodePoolsGetCall) Name(name string) *ProjectsZonesClustersNodePoolsGetCall { + c.urlParams_.Set("name", name) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -5867,25 +11151,30 @@ func (c *ProjectsZonesClustersNodePoolsGetCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, + // "name": { + // "description": "The name (project, location, cluster, node pool id) of the node pool to get.\nSpecified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.", + // "location": "query", + // "type": "string" + // }, // "nodePoolId": { - // "description": "The name of the node pool.", + // "description": "Deprecated. The name of the node pool.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -5924,6 +11213,14 @@ func (r *ProjectsZonesClustersNodePoolsService) List(projectId string, zone stri return c } +// Parent sets the optional parameter "parent": The parent (project, +// location, cluster id) where the node pools will be listed. +// Specified in the format 'projects/*/locations/*/clusters/*'. +func (c *ProjectsZonesClustersNodePoolsListCall) Parent(parent string) *ProjectsZonesClustersNodePoolsListCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -6031,19 +11328,24 @@ func (c *ProjectsZonesClustersNodePoolsListCall) Do(opts ...googleapi.CallOption // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster.", + // "description": "Deprecated. The name of the cluster.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, + // "parent": { + // "description": "The parent (project, location, cluster id) where the node pools will be listed.\nSpecified in the format 'projects/*/locations/*/clusters/*'.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://developers.google.com/console/help/new/#projectnumber).\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" @@ -6188,25 +11490,25 @@ func (c *ProjectsZonesClustersNodePoolsRollbackCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to rollback.", + // "description": "Deprecated. The name of the cluster to rollback.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "The name of the node pool to rollback.", + // "description": "Deprecated. The name of the node pool to rollback.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -6352,25 +11654,25 @@ func (c *ProjectsZonesClustersNodePoolsSetManagementCall) Do(opts ...googleapi.C // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to update.", + // "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "The name of the node pool to update.", + // "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -6516,25 +11818,25 @@ func (c *ProjectsZonesClustersNodePoolsSetSizeCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to update.", + // "description": "Deprecated. The name of the cluster to update.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "The name of the node pool to update.", + // "description": "Deprecated. The name of the node pool to update.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -6681,25 +11983,25 @@ func (c *ProjectsZonesClustersNodePoolsUpdateCall) Do(opts ...googleapi.CallOpti // ], // "parameters": { // "clusterId": { - // "description": "The name of the cluster to upgrade.", + // "description": "Deprecated. The name of the cluster to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "nodePoolId": { - // "description": "The name of the node pool to upgrade.", + // "description": "Deprecated. The name of the node pool to upgrade.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -6841,19 +12143,19 @@ func (c *ProjectsZonesOperationsCancelCall) Do(opts ...googleapi.CallOption) (*E // ], // "parameters": { // "operationId": { - // "description": "The server-assigned `name` of the operation.", + // "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the operation resides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -6895,6 +12197,14 @@ func (r *ProjectsZonesOperationsService) Get(projectId string, zone string, oper return c } +// Name sets the optional parameter "name": The name (project, location, +// operation id) of the operation to get. +// Specified in the format 'projects/*/locations/*/operations/*'. +func (c *ProjectsZonesOperationsGetCall) Name(name string) *ProjectsZonesOperationsGetCall { + c.urlParams_.Set("name", name) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -7001,20 +12311,25 @@ func (c *ProjectsZonesOperationsGetCall) Do(opts ...googleapi.CallOption) (*Oper // "operationId" // ], // "parameters": { + // "name": { + // "description": "The name (project, location, operation id) of the operation to get.\nSpecified in the format 'projects/*/locations/*/operations/*'.", + // "location": "query", + // "type": "string" + // }, // "operationId": { - // "description": "The server-assigned `name` of the operation.", + // "description": "Deprecated. The server-assigned `name` of the operation.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.", + // "description": "Deprecated. The name of the Google Compute Engine\n[zone](/compute/docs/zones#available) in which the cluster\nresides.\nThis field has been deprecated and replaced by the name field.", // "location": "path", // "required": true, // "type": "string" @@ -7052,6 +12367,15 @@ func (r *ProjectsZonesOperationsService) List(projectId string, zone string) *Pr return c } +// Parent sets the optional parameter "parent": The parent (project and +// location) where the operations will be listed. +// Specified in the format 'projects/*/locations/*'. +// Location "-" matches all zones and all regions. +func (c *ProjectsZonesOperationsListCall) Parent(parent string) *ProjectsZonesOperationsListCall { + c.urlParams_.Set("parent", parent) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -7156,14 +12480,19 @@ func (c *ProjectsZonesOperationsListCall) Do(opts ...googleapi.CallOption) (*Lis // "zone" // ], // "parameters": { + // "parent": { + // "description": "The parent (project and location) where the operations will be listed.\nSpecified in the format 'projects/*/locations/*'.\nLocation \"-\" matches all zones and all regions.", + // "location": "query", + // "type": "string" + // }, // "projectId": { - // "description": "The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).", + // "description": "Deprecated. The Google Developers Console [project ID or project\nnumber](https://support.google.com/cloud/answer/6158840).\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" // }, // "zone": { - // "description": "The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.", + // "description": "Deprecated. The name of the Google Compute Engine [zone](/compute/docs/zones#available)\nto return operations for, or `-` for all zones.\nThis field has been deprecated and replaced by the parent field.", // "location": "path", // "required": true, // "type": "string" diff --git a/vendor/google.golang.org/api/container/v1beta1/container-api.json b/vendor/google.golang.org/api/container/v1beta1/container-api.json index a79046eeb..3c6ca89e4 100644 --- a/vendor/google.golang.org/api/container/v1beta1/container-api.json +++ b/vendor/google.golang.org/api/container/v1beta1/container-api.json @@ -2372,7 +2372,7 @@ } } }, - "revision": "20180223", + "revision": "20180308", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2524,7 +2524,7 @@ "type": "string" }, "initialClusterVersion": { - "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.", + "description": "The initial Kubernetes version for this cluster. Valid versions are those\nfound in validMasterVersions returned by getServerConfig. The version can\nbe upgraded over time; such upgrades are reflected in\ncurrentMasterVersion and currentNodeVersion.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"\",\"-\": picks the default Kubernetes version", "type": "string" }, "initialNodeCount": { @@ -2578,6 +2578,10 @@ "$ref": "MasterAuthorizedNetworksConfig", "description": "The configuration options for master authorized networks feature." }, + "masterIpv4CidrBlock": { + "description": "The IP prefix in CIDR notation to use for the hosted master network.\nThis prefix will be used for assigning private IP addresses to the\nmaster or set of masters, as well as the ILB VIP.", + "type": "string" + }, "monitoringService": { "description": "The monitoring service the cluster should use to write metrics.\nCurrently available options:\n\n* `monitoring.googleapis.com` - the Google Cloud Monitoring service.\n* `none` - no metrics will be exported from the cluster.\n* if left as an empty string, `monitoring.googleapis.com` will be used.", "type": "string" @@ -2587,7 +2591,7 @@ "type": "string" }, "network": { - "description": "The name of the Google Compute Engine\n[network](/compute/docs/networks-and-firewalls#networks) to which the\ncluster is connected. If left unspecified, the `default` network\nwill be used.", + "description": "The name of the Google Compute Engine\n[network](/compute/docs/networks-and-firewalls#networks) to which the\ncluster is connected. If left unspecified, the `default` network\nwill be used. On output this shows the network ID instead of\nthe name.", "type": "string" }, "networkPolicy": { @@ -2614,6 +2618,10 @@ "$ref": "PodSecurityPolicyConfig", "description": "Configuration for the PodSecurityPolicy feature." }, + "privateCluster": { + "description": "If this is a private cluster setup. Private clusters are clusters that, by\ndefault have no external IP addresses on the nodes and where nodes and the\nmaster communicate over private IP addresses.", + "type": "boolean" + }, "resourceLabels": { "additionalProperties": { "type": "string" @@ -2656,7 +2664,7 @@ "type": "string" }, "subnetwork": { - "description": "The name of the Google Compute Engine\n[subnetwork](/compute/docs/subnetworks) to which the\ncluster is connected.", + "description": "The name of the Google Compute Engine\n[subnetwork](/compute/docs/subnetworks) to which the\ncluster is connected. On output this shows the subnetwork ID instead of\nthe name.", "type": "string" }, "zone": { @@ -2690,7 +2698,7 @@ "description": "The desired configuration options for master authorized networks feature." }, "desiredMasterVersion": { - "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version. Use \"-\" to have the server automatically select\nthe latest version.", + "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", "type": "string" }, "desiredMonitoringService": { @@ -2706,7 +2714,7 @@ "type": "string" }, "desiredNodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", + "description": "The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", "type": "string" }, "desiredPodSecurityPolicyConfig": { @@ -3892,7 +3900,7 @@ "type": "string" }, "masterVersion": { - "description": "The Kubernetes version to change the master to. The only valid value is the\nlatest supported version. Use \"-\" to have the server automatically select\nthe latest version.", + "description": "The Kubernetes version to change the master to.\n\nUsers may specify either explicit versions offered by\nKubernetes Engine or version aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the default Kubernetes version", "type": "string" }, "name": { @@ -3931,7 +3939,7 @@ "type": "string" }, "nodeVersion": { - "description": "The Kubernetes version to change the nodes to (typically an\nupgrade). Use `-` to upgrade to the latest version supported by\nthe server.", + "description": "The Kubernetes version to change the nodes to (typically an\nupgrade).\n\nUsers may specify either explicit versions offered by Kubernetes Engine or\nversion aliases, which have the following behavior:\n\n- \"latest\": picks the highest valid Kubernetes version\n- \"1.X\": picks the highest valid patch+gke.N patch in the 1.X version\n- \"1.X.Y\": picks the highest valid gke.N patch in the 1.X.Y version\n- \"1.X.Y-gke.N\": picks an explicit Kubernetes version\n- \"-\": picks the Kubernetes master version", "type": "string" }, "projectId": { diff --git a/vendor/google.golang.org/api/container/v1beta1/container-gen.go b/vendor/google.golang.org/api/container/v1beta1/container-gen.go index 854b84f47..7bde17245 100644 --- a/vendor/google.golang.org/api/container/v1beta1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1beta1/container-gen.go @@ -496,6 +496,17 @@ type Cluster struct { // be upgraded over time; such upgrades are reflected // in // currentMasterVersion and currentNodeVersion. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "","-": picks the default Kubernetes version InitialClusterVersion string `json:"initialClusterVersion,omitempty"` // InitialNodeCount: The number of nodes to create in this cluster. You @@ -563,6 +574,13 @@ type Cluster struct { // authorized networks feature. MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `json:"masterAuthorizedNetworksConfig,omitempty"` + // MasterIpv4CidrBlock: The IP prefix in CIDR notation to use for the + // hosted master network. + // This prefix will be used for assigning private IP addresses to + // the + // master or set of masters, as well as the ILB VIP. + MasterIpv4CidrBlock string `json:"masterIpv4CidrBlock,omitempty"` + // MonitoringService: The monitoring service the cluster should use to // write metrics. // Currently available options: @@ -589,7 +607,8 @@ type Cluster struct { // [network](/compute/docs/networks-and-firewalls#networks) to which // the // cluster is connected. If left unspecified, the `default` network - // will be used. + // will be used. On output this shows the network ID instead of + // the name. Network string `json:"network,omitempty"` // NetworkPolicy: Configuration options for the NetworkPolicy feature. @@ -628,6 +647,13 @@ type Cluster struct { // feature. PodSecurityPolicyConfig *PodSecurityPolicyConfig `json:"podSecurityPolicyConfig,omitempty"` + // PrivateCluster: If this is a private cluster setup. Private clusters + // are clusters that, by + // default have no external IP addresses on the nodes and where nodes + // and the + // master communicate over private IP addresses. + PrivateCluster bool `json:"privateCluster,omitempty"` + // ResourceLabels: The resource labels for the cluster to use to // annotate any related GCE // resources. @@ -679,7 +705,9 @@ type Cluster struct { // Subnetwork: The name of the Google Compute // Engine // [subnetwork](/compute/docs/subnetworks) to which the - // cluster is connected. + // cluster is connected. On output this shows the subnetwork ID instead + // of + // the name. Subnetwork string `json:"subnetwork,omitempty"` // Zone: [Output only] The name of the Google Compute @@ -750,9 +778,18 @@ type ClusterUpdate struct { // DesiredMasterVersion: The Kubernetes version to change the master to. // The only valid value is the - // latest supported version. Use "-" to have the server automatically - // select - // the latest version. + // latest supported version. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the default Kubernetes version DesiredMasterVersion string `json:"desiredMasterVersion,omitempty"` // DesiredMonitoringService: The monitoring service the cluster should @@ -781,8 +818,18 @@ type ClusterUpdate struct { // DesiredNodeVersion: The Kubernetes version to change the nodes to // (typically an - // upgrade). Use `-` to upgrade to the latest version supported by - // the server. + // upgrade). + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the Kubernetes master version DesiredNodeVersion string `json:"desiredNodeVersion,omitempty"` // DesiredPodSecurityPolicyConfig: The desired configuration options for @@ -3146,11 +3193,18 @@ type UpdateMasterRequest struct { // This field has been deprecated and replaced by the name field. ClusterId string `json:"clusterId,omitempty"` - // MasterVersion: The Kubernetes version to change the master to. The - // only valid value is the - // latest supported version. Use "-" to have the server automatically - // select - // the latest version. + // MasterVersion: The Kubernetes version to change the master to. + // + // Users may specify either explicit versions offered by + // Kubernetes Engine or version aliases, which have the following + // behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the default Kubernetes version MasterVersion string `json:"masterVersion,omitempty"` // Name: The name (project, location, cluster) of the cluster to @@ -3218,8 +3272,18 @@ type UpdateNodePoolRequest struct { // NodeVersion: The Kubernetes version to change the nodes to (typically // an - // upgrade). Use `-` to upgrade to the latest version supported by - // the server. + // upgrade). + // + // Users may specify either explicit versions offered by Kubernetes + // Engine or + // version aliases, which have the following behavior: + // + // - "latest": picks the highest valid Kubernetes version + // - "1.X": picks the highest valid patch+gke.N patch in the 1.X + // version + // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version + // - "1.X.Y-gke.N": picks an explicit Kubernetes version + // - "-": picks the Kubernetes master version NodeVersion string `json:"nodeVersion,omitempty"` // ProjectId: Deprecated. The Google Developers Console [project ID or diff --git a/vendor/google.golang.org/api/content/v2/content-api.json b/vendor/google.golang.org/api/content/v2/content-api.json index 2211821a3..02d950460 100644 --- a/vendor/google.golang.org/api/content/v2/content-api.json +++ b/vendor/google.golang.org/api/content/v2/content-api.json @@ -15,7 +15,7 @@ "description": "Manages product items, inventory, and Merchant Center accounts for Google Shopping.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/shopping-content", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/1qOLk-oK_dMC3AStRBy-xt64OKE\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/7AU-xNxfu9N_gwoFbvlNkxATsnE\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -1029,6 +1029,339 @@ } } }, + "liasettings": { + "methods": { + "custombatch": { + "description": "Retrieves and/or updates the LIA settings of multiple accounts in a single request.", + "httpMethod": "POST", + "id": "content.liasettings.custombatch", + "parameters": { + "dryRun": { + "description": "Flag to run the request in dry-run mode.", + "location": "query", + "type": "boolean" + } + }, + "path": "liasettings/batch", + "request": { + "$ref": "LiasettingsCustomBatchRequest" + }, + "response": { + "$ref": "LiasettingsCustomBatchResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "get": { + "description": "Retrieves the LIA settings of the account.", + "httpMethod": "GET", + "id": "content.liasettings.get", + "parameterOrder": [ + "merchantId", + "accountId" + ], + "parameters": { + "accountId": { + "description": "The ID of the account for which to get or update LIA settings.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}", + "response": { + "$ref": "LiaSettings" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "getaccessiblegmbaccounts": { + "description": "Retrieves the list of accessible Google My Business accounts.", + "httpMethod": "GET", + "id": "content.liasettings.getaccessiblegmbaccounts", + "parameterOrder": [ + "merchantId", + "accountId" + ], + "parameters": { + "accountId": { + "description": "The ID of the account for which to retrieve accessible Google My Business accounts.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}/accessiblegmbaccounts", + "response": { + "$ref": "LiasettingsGetAccessibleGmbAccountsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "list": { + "description": "Lists the LIA settings of the sub-accounts in your Merchant Center account.", + "httpMethod": "GET", + "id": "content.liasettings.list", + "parameterOrder": [ + "merchantId" + ], + "parameters": { + "maxResults": { + "description": "The maximum number of LIA settings to return in the response, used for paging.", + "format": "uint32", + "location": "query", + "type": "integer" + }, + "merchantId": { + "description": "The ID of the managing account. This must be a multi-client account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "pageToken": { + "description": "The token returned by the previous request.", + "location": "query", + "type": "string" + } + }, + "path": "{merchantId}/liasettings", + "response": { + "$ref": "LiasettingsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "patch": { + "description": "Updates the LIA settings of the account. This method supports patch semantics.", + "httpMethod": "PATCH", + "id": "content.liasettings.patch", + "parameterOrder": [ + "merchantId", + "accountId" + ], + "parameters": { + "accountId": { + "description": "The ID of the account for which to get or update LIA settings.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "dryRun": { + "description": "Flag to run the request in dry-run mode.", + "location": "query", + "type": "boolean" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}", + "request": { + "$ref": "LiaSettings" + }, + "response": { + "$ref": "LiaSettings" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "requestgmbaccess": { + "description": "Requests access to a specified Google My Business account.", + "httpMethod": "POST", + "id": "content.liasettings.requestgmbaccess", + "parameterOrder": [ + "merchantId", + "accountId" + ], + "parameters": { + "accountId": { + "description": "The ID of the account for which GMB access is requested.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "gmbEmail": { + "description": "The email of the Google My Business account.", + "location": "query", + "type": "string" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}/requestgmbaccess", + "response": { + "$ref": "LiasettingsRequestGmbAccessResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "requestinventoryverification": { + "description": "Requests inventory validation for the specified country.", + "httpMethod": "POST", + "id": "content.liasettings.requestinventoryverification", + "parameterOrder": [ + "merchantId", + "accountId", + "country" + ], + "parameters": { + "accountId": { + "description": "The ID of the account that manages the order. This cannot be a multi-client account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "country": { + "description": "The country for which inventory validation is requested.", + "location": "path", + "required": true, + "type": "string" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}/requestinventoryverification/{country}", + "response": { + "$ref": "LiasettingsRequestInventoryVerificationResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "setinventoryverificationcontact": { + "description": "Sets the inventory verification contract for the specified country.", + "httpMethod": "POST", + "id": "content.liasettings.setinventoryverificationcontact", + "parameterOrder": [ + "merchantId", + "accountId" + ], + "parameters": { + "accountId": { + "description": "The ID of the account that manages the order. This cannot be a multi-client account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "contactEmail": { + "description": "The email of the inventory verification contact.", + "location": "query", + "type": "string" + }, + "contactName": { + "description": "The name of the inventory verification contact.", + "location": "query", + "type": "string" + }, + "country": { + "description": "The country for which inventory verification is requested.", + "location": "query", + "type": "string" + }, + "language": { + "description": "The language for which inventory verification is requested.", + "location": "query", + "type": "string" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}/setinventoryverificationcontact", + "response": { + "$ref": "LiasettingsSetInventoryVerificationContactResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + }, + "update": { + "description": "Updates the LIA settings of the account.", + "httpMethod": "PUT", + "id": "content.liasettings.update", + "parameterOrder": [ + "merchantId", + "accountId" + ], + "parameters": { + "accountId": { + "description": "The ID of the account for which to get or update LIA settings.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + }, + "dryRun": { + "description": "Flag to run the request in dry-run mode.", + "location": "query", + "type": "boolean" + }, + "merchantId": { + "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + "format": "uint64", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{merchantId}/liasettings/{accountId}", + "request": { + "$ref": "LiaSettings" + }, + "response": { + "$ref": "LiaSettings" + }, + "scopes": [ + "https://www.googleapis.com/auth/content" + ] + } + } + }, "orders": { "methods": { "acknowledge": { @@ -1788,7 +2121,7 @@ "type": "boolean" }, "merchantId": { - "description": "The ID of the POS provider.", + "description": "The ID of the POS or inventory data provider.", "format": "uint64", "location": "path", "required": true, @@ -1824,7 +2157,7 @@ ], "parameters": { "merchantId": { - "description": "The ID of the POS provider.", + "description": "The ID of the POS or inventory data provider.", "format": "uint64", "location": "path", "required": true, @@ -1867,7 +2200,7 @@ "type": "boolean" }, "merchantId": { - "description": "The ID of the POS provider.", + "description": "The ID of the POS or inventory data provider.", "format": "uint64", "location": "path", "required": true, @@ -1907,7 +2240,7 @@ "type": "boolean" }, "merchantId": { - "description": "The ID of the POS provider.", + "description": "The ID of the POS or inventory data provider.", "format": "uint64", "location": "path", "required": true, @@ -1942,7 +2275,7 @@ ], "parameters": { "merchantId": { - "description": "The ID of the POS provider.", + "description": "The ID of the POS or inventory data provider.", "format": "uint64", "location": "path", "required": true, @@ -1979,7 +2312,7 @@ "type": "boolean" }, "merchantId": { - "description": "The ID of the POS provider.", + "description": "The ID of the POS or inventory data provider.", "format": "uint64", "location": "path", "required": true, @@ -2497,7 +2830,7 @@ } } }, - "revision": "20180305", + "revision": "20180316", "rootUrl": "https://www.googleapis.com/", "schemas": { "Account": { @@ -3795,6 +4128,47 @@ }, "type": "object" }, + "GmbAccounts": { + "id": "GmbAccounts", + "properties": { + "accountId": { + "description": "The ID of the account.", + "format": "uint64", + "type": "string" + }, + "gmbAccounts": { + "description": "A list of GMB accounts which are available to the merchant.", + "items": { + "$ref": "GmbAccountsGmbAccount" + }, + "type": "array" + } + }, + "type": "object" + }, + "GmbAccountsGmbAccount": { + "id": "GmbAccountsGmbAccount", + "properties": { + "email": { + "description": "The email which identifies the GMB account.", + "type": "string" + }, + "listingCount": { + "description": "Number of listings under this account.", + "format": "uint64", + "type": "string" + }, + "name": { + "description": "The name of the GMB account.", + "type": "string" + }, + "type": { + "description": "The type of the GMB account (User or Business).", + "type": "string" + } + }, + "type": "object" + }, "Headers": { "description": "A non-empty list of row or column headers for a table. Exactly one of prices, weights, numItems, postalCodeGroupNames, or locations must be set.", "id": "Headers", @@ -4109,6 +4483,294 @@ }, "type": "object" }, + "LiaAboutPageSettings": { + "id": "LiaAboutPageSettings", + "properties": { + "status": { + "description": "The status of the verification process for the About page.", + "type": "string" + }, + "url": { + "description": "The URL for the About page.", + "type": "string" + } + }, + "type": "object" + }, + "LiaCountrySettings": { + "id": "LiaCountrySettings", + "properties": { + "about": { + "$ref": "LiaAboutPageSettings", + "description": "The settings for the About page." + }, + "country": { + "annotations": { + "required": [ + "content.liasettings.update" + ] + }, + "description": "CLDR country code (e.g. \"US\").", + "type": "string" + }, + "hostedLocalStorefrontActive": { + "description": "The status of the \"Merchant hosted local storefront\" feature.", + "type": "boolean" + }, + "inventory": { + "$ref": "LiaInventorySettings", + "description": "LIA inventory verification settings." + }, + "onDisplayToOrder": { + "$ref": "LiaOnDisplayToOrderSettings", + "description": "LIA \"On Display To Order\" settings." + }, + "storePickupActive": { + "description": "The status of the \"Store pickup\" feature.", + "type": "boolean" + } + }, + "type": "object" + }, + "LiaInventorySettings": { + "id": "LiaInventorySettings", + "properties": { + "inventoryVerificationContactEmail": { + "description": "The email of the contact for the inventory verification process.", + "type": "string" + }, + "inventoryVerificationContactName": { + "description": "The name of the contact for the inventory verification process.", + "type": "string" + }, + "inventoryVerificationContactStatus": { + "description": "The status of the verification contact.", + "type": "string" + }, + "status": { + "description": "The status of the inventory verification process.", + "type": "string" + } + }, + "type": "object" + }, + "LiaOnDisplayToOrderSettings": { + "id": "LiaOnDisplayToOrderSettings", + "properties": { + "shippingCostPolicyUrl": { + "description": "Shipping cost and policy URL.", + "type": "string" + }, + "status": { + "description": "The status of the ?On display to order? feature.", + "type": "string" + } + }, + "type": "object" + }, + "LiaSettings": { + "id": "LiaSettings", + "properties": { + "accountId": { + "description": "The ID of the account to which these LIA settings belong. Ignored upon update, always present in get request responses.", + "format": "uint64", + "type": "string" + }, + "countrySettings": { + "description": "The LIA settings for each country.", + "items": { + "$ref": "LiaCountrySettings" + }, + "type": "array" + }, + "kind": { + "default": "content#liaSettings", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liaSettings\".", + "type": "string" + } + }, + "type": "object" + }, + "LiasettingsCustomBatchRequest": { + "id": "LiasettingsCustomBatchRequest", + "properties": { + "entries": { + "description": "The request entries to be processed in the batch.", + "items": { + "$ref": "LiasettingsCustomBatchRequestEntry" + }, + "type": "array" + } + }, + "type": "object" + }, + "LiasettingsCustomBatchRequestEntry": { + "id": "LiasettingsCustomBatchRequestEntry", + "properties": { + "accountId": { + "description": "The ID of the account for which to get/update account shipping settings.", + "format": "uint64", + "type": "string" + }, + "batchId": { + "description": "An entry ID, unique within the batch request.", + "format": "uint32", + "type": "integer" + }, + "contactEmail": { + "description": "Inventory validation contact email. Required only for SetInventoryValidationContact.", + "type": "string" + }, + "contactName": { + "description": "Inventory validation contact name. Required only for SetInventoryValidationContact.", + "type": "string" + }, + "country": { + "description": "The country code. Required only for RequestInventoryVerification.", + "type": "string" + }, + "gmbEmail": { + "description": "The GMB account. Required only for RequestGmbAccess.", + "type": "string" + }, + "liaSettings": { + "$ref": "LiaSettings", + "description": "The account Lia settings to update. Only defined if the method is update." + }, + "merchantId": { + "description": "The ID of the managing account.", + "format": "uint64", + "type": "string" + }, + "method": { + "type": "string" + } + }, + "type": "object" + }, + "LiasettingsCustomBatchResponse": { + "id": "LiasettingsCustomBatchResponse", + "properties": { + "entries": { + "description": "The result of the execution of the batch requests.", + "items": { + "$ref": "LiasettingsCustomBatchResponseEntry" + }, + "type": "array" + }, + "kind": { + "default": "content#liasettingsCustomBatchResponse", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsCustomBatchResponse\".", + "type": "string" + } + }, + "type": "object" + }, + "LiasettingsCustomBatchResponseEntry": { + "id": "LiasettingsCustomBatchResponseEntry", + "properties": { + "batchId": { + "description": "The ID of the request entry to which this entry responds.", + "format": "uint32", + "type": "integer" + }, + "errors": { + "$ref": "Errors", + "description": "A list of errors defined if, and only if, the request failed." + }, + "gmbAccounts": { + "$ref": "GmbAccounts", + "description": "The the list of accessible GMB accounts." + }, + "kind": { + "default": "content#liasettingsCustomBatchResponseEntry", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsCustomBatchResponseEntry\".", + "type": "string" + }, + "liaSettings": { + "$ref": "LiaSettings", + "description": "The retrieved or updated Lia settings." + } + }, + "type": "object" + }, + "LiasettingsGetAccessibleGmbAccountsResponse": { + "id": "LiasettingsGetAccessibleGmbAccountsResponse", + "properties": { + "accountId": { + "description": "The ID of the account.", + "format": "uint64", + "type": "string" + }, + "gmbAccounts": { + "description": "A list of GMB accounts which are available to the merchant.", + "items": { + "$ref": "GmbAccountsGmbAccount" + }, + "type": "array" + }, + "kind": { + "default": "content#liasettingsGetAccessibleGmbAccountsResponse", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsGetAccessibleGmbAccountsResponse\".", + "type": "string" + } + }, + "type": "object" + }, + "LiasettingsListResponse": { + "id": "LiasettingsListResponse", + "properties": { + "kind": { + "default": "content#liasettingsListResponse", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsListResponse\".", + "type": "string" + }, + "nextPageToken": { + "description": "The token for the retrieval of the next page of LIA settings.", + "type": "string" + }, + "resources": { + "items": { + "$ref": "LiaSettings" + }, + "type": "array" + } + }, + "type": "object" + }, + "LiasettingsRequestGmbAccessResponse": { + "id": "LiasettingsRequestGmbAccessResponse", + "properties": { + "kind": { + "default": "content#liasettingsRequestGmbAccessResponse", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsRequestGmbAccessResponse\".", + "type": "string" + } + }, + "type": "object" + }, + "LiasettingsRequestInventoryVerificationResponse": { + "id": "LiasettingsRequestInventoryVerificationResponse", + "properties": { + "kind": { + "default": "content#liasettingsRequestInventoryVerificationResponse", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsRequestInventoryVerificationResponse\".", + "type": "string" + } + }, + "type": "object" + }, + "LiasettingsSetInventoryVerificationContactResponse": { + "id": "LiasettingsSetInventoryVerificationContactResponse", + "properties": { + "kind": { + "default": "content#liasettingsSetInventoryVerificationContactResponse", + "description": "Identifies what kind of resource this is. Value: the fixed string \"content#liasettingsSetInventoryVerificationContactResponse\".", + "type": "string" + } + }, + "type": "object" + }, "LocationIdSet": { "id": "LocationIdSet", "properties": { @@ -4316,7 +4978,7 @@ "id": "OrderCustomer", "properties": { "email": { - "description": "Email address of the customer.", + "description": "Email address that should be used for order related communications. In certain cases this might not be a real users email, but a proxy email.", "type": "string" }, "explicitMarketingPreference": { @@ -6052,6 +6714,11 @@ "id": "PosInventory", "properties": { "contentLanguage": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The two-letter ISO 639-1 language code for the item.", "type": "string" }, @@ -6060,6 +6727,11 @@ "type": "string" }, "itemId": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "A unique identifier for the item.", "type": "string" }, @@ -6070,22 +6742,47 @@ }, "price": { "$ref": "Price", + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The current price of the item." }, "quantity": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The available quantity of the item.", "format": "int64", "type": "string" }, "storeCode": { - "description": "The identifier of the merchant's store.", + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, + "description": "The identifier of the merchant's store. Either a storeCode inserted via the API or the code of the store in Google My Business.", "type": "string" }, "targetCountry": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The CLDR territory code for the item.", "type": "string" }, "timestamp": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The inventory timestamp, in ISO 8601 format.", "type": "string" } @@ -6096,6 +6793,11 @@ "id": "PosInventoryRequest", "properties": { "contentLanguage": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The two-letter ISO 639-1 language code for the item.", "type": "string" }, @@ -6104,27 +6806,57 @@ "type": "string" }, "itemId": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "A unique identifier for the item.", "type": "string" }, "price": { "$ref": "Price", + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The current price of the item." }, "quantity": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The available quantity of the item.", "format": "int64", "type": "string" }, "storeCode": { - "description": "The identifier of the merchant's store.", + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, + "description": "The identifier of the merchant's store. Either a storeCode inserted via the API or the code of the store in Google My Business.", "type": "string" }, "targetCountry": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The CLDR territory code for the item.", "type": "string" }, "timestamp": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The inventory timestamp, in ISO 8601 format.", "type": "string" } @@ -6135,6 +6867,11 @@ "id": "PosInventoryResponse", "properties": { "contentLanguage": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The two-letter ISO 639-1 language code for the item.", "type": "string" }, @@ -6143,6 +6880,11 @@ "type": "string" }, "itemId": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "A unique identifier for the item.", "type": "string" }, @@ -6153,22 +6895,47 @@ }, "price": { "$ref": "Price", + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The current price of the item." }, "quantity": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The available quantity of the item.", "format": "int64", "type": "string" }, "storeCode": { - "description": "The identifier of the merchant's store.", + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, + "description": "The identifier of the merchant's store. Either a storeCode inserted via the API or the code of the store in Google My Business.", "type": "string" }, "targetCountry": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The CLDR territory code for the item.", "type": "string" }, "timestamp": { + "annotations": { + "required": [ + "content.pos.inventory" + ] + }, "description": "The inventory timestamp, in ISO 8601 format.", "type": "string" } @@ -6197,6 +6964,11 @@ "id": "PosSale", "properties": { "contentLanguage": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The two-letter ISO 639-1 language code for the item.", "type": "string" }, @@ -6205,6 +6977,11 @@ "type": "string" }, "itemId": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "A unique identifier for the item.", "type": "string" }, @@ -6215,9 +6992,19 @@ }, "price": { "$ref": "Price", + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The price of the item." }, "quantity": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The relative change of the available quantity. Negative for items sold.", "format": "int64", "type": "string" @@ -6227,14 +7014,29 @@ "type": "string" }, "storeCode": { - "description": "The identifier of the merchant's store.", + "annotations": { + "required": [ + "content.pos.sale" + ] + }, + "description": "The identifier of the merchant's store. Either a storeCode inserted via the API or the code of the store in Google My Business.", "type": "string" }, "targetCountry": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The CLDR territory code for the item.", "type": "string" }, "timestamp": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The inventory timestamp, in ISO 8601 format.", "type": "string" } @@ -6245,6 +7047,11 @@ "id": "PosSaleRequest", "properties": { "contentLanguage": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The two-letter ISO 639-1 language code for the item.", "type": "string" }, @@ -6253,14 +7060,29 @@ "type": "string" }, "itemId": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "A unique identifier for the item.", "type": "string" }, "price": { "$ref": "Price", + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The price of the item." }, "quantity": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The relative change of the available quantity. Negative for items sold.", "format": "int64", "type": "string" @@ -6270,14 +7092,29 @@ "type": "string" }, "storeCode": { - "description": "The identifier of the merchant's store.", + "annotations": { + "required": [ + "content.pos.sale" + ] + }, + "description": "The identifier of the merchant's store. Either a storeCode inserted via the API or the code of the store in Google My Business.", "type": "string" }, "targetCountry": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The CLDR territory code for the item.", "type": "string" }, "timestamp": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The inventory timestamp, in ISO 8601 format.", "type": "string" } @@ -6288,6 +7125,11 @@ "id": "PosSaleResponse", "properties": { "contentLanguage": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The two-letter ISO 639-1 language code for the item.", "type": "string" }, @@ -6296,6 +7138,11 @@ "type": "string" }, "itemId": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "A unique identifier for the item.", "type": "string" }, @@ -6306,9 +7153,19 @@ }, "price": { "$ref": "Price", + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The price of the item." }, "quantity": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The relative change of the available quantity. Negative for items sold.", "format": "int64", "type": "string" @@ -6318,14 +7175,29 @@ "type": "string" }, "storeCode": { - "description": "The identifier of the merchant's store.", + "annotations": { + "required": [ + "content.pos.sale" + ] + }, + "description": "The identifier of the merchant's store. Either a storeCode inserted via the API or the code of the store in Google My Business.", "type": "string" }, "targetCountry": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The CLDR territory code for the item.", "type": "string" }, "timestamp": { + "annotations": { + "required": [ + "content.pos.sale" + ] + }, "description": "The inventory timestamp, in ISO 8601 format.", "type": "string" } @@ -6342,10 +7214,20 @@ "type": "string" }, "storeAddress": { + "annotations": { + "required": [ + "content.pos.insert" + ] + }, "description": "The street address of the store.", "type": "string" }, "storeCode": { + "annotations": { + "required": [ + "content.pos.insert" + ] + }, "description": "A store identifier that is unique for the given merchant.", "type": "string" } diff --git a/vendor/google.golang.org/api/content/v2/content-gen.go b/vendor/google.golang.org/api/content/v2/content-gen.go index e813669e0..9f7ed5908 100644 --- a/vendor/google.golang.org/api/content/v2/content-gen.go +++ b/vendor/google.golang.org/api/content/v2/content-gen.go @@ -62,6 +62,7 @@ func New(client *http.Client) (*APIService, error) { s.Datafeeds = NewDatafeedsService(s) s.Datafeedstatuses = NewDatafeedstatusesService(s) s.Inventory = NewInventoryService(s) + s.Liasettings = NewLiasettingsService(s) s.Orders = NewOrdersService(s) s.Pos = NewPosService(s) s.Products = NewProductsService(s) @@ -87,6 +88,8 @@ type APIService struct { Inventory *InventoryService + Liasettings *LiasettingsService + Orders *OrdersService Pos *PosService @@ -159,6 +162,15 @@ type InventoryService struct { s *APIService } +func NewLiasettingsService(s *APIService) *LiasettingsService { + rs := &LiasettingsService{s: s} + return rs +} + +type LiasettingsService struct { + s *APIService +} + func NewOrdersService(s *APIService) *OrdersService { rs := &OrdersService{s: s} return rs @@ -2252,6 +2264,73 @@ func (s *Errors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GmbAccounts struct { + // AccountId: The ID of the account. + AccountId uint64 `json:"accountId,omitempty,string"` + + // GmbAccounts: A list of GMB accounts which are available to the + // merchant. + GmbAccounts []*GmbAccountsGmbAccount `json:"gmbAccounts,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccountId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccountId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GmbAccounts) MarshalJSON() ([]byte, error) { + type NoMethod GmbAccounts + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GmbAccountsGmbAccount struct { + // Email: The email which identifies the GMB account. + Email string `json:"email,omitempty"` + + // ListingCount: Number of listings under this account. + ListingCount uint64 `json:"listingCount,omitempty,string"` + + // Name: The name of the GMB account. + Name string `json:"name,omitempty"` + + // Type: The type of the GMB account (User or Business). + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Email") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Email") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GmbAccountsGmbAccount) MarshalJSON() ([]byte, error) { + type NoMethod GmbAccountsGmbAccount + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Headers: A non-empty list of row or column headers for a table. // Exactly one of prices, weights, numItems, postalCodeGroupNames, or // locations must be set. @@ -2778,6 +2857,521 @@ func (s *InventorySetResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type LiaAboutPageSettings struct { + // Status: The status of the verification process for the About page. + Status string `json:"status,omitempty"` + + // Url: The URL for the About page. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Status") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Status") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiaAboutPageSettings) MarshalJSON() ([]byte, error) { + type NoMethod LiaAboutPageSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiaCountrySettings struct { + // About: The settings for the About page. + About *LiaAboutPageSettings `json:"about,omitempty"` + + // Country: CLDR country code (e.g. "US"). + Country string `json:"country,omitempty"` + + // HostedLocalStorefrontActive: The status of the "Merchant hosted local + // storefront" feature. + HostedLocalStorefrontActive bool `json:"hostedLocalStorefrontActive,omitempty"` + + // Inventory: LIA inventory verification settings. + Inventory *LiaInventorySettings `json:"inventory,omitempty"` + + // OnDisplayToOrder: LIA "On Display To Order" settings. + OnDisplayToOrder *LiaOnDisplayToOrderSettings `json:"onDisplayToOrder,omitempty"` + + // StorePickupActive: The status of the "Store pickup" feature. + StorePickupActive bool `json:"storePickupActive,omitempty"` + + // ForceSendFields is a list of field names (e.g. "About") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "About") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiaCountrySettings) MarshalJSON() ([]byte, error) { + type NoMethod LiaCountrySettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiaInventorySettings struct { + // InventoryVerificationContactEmail: The email of the contact for the + // inventory verification process. + InventoryVerificationContactEmail string `json:"inventoryVerificationContactEmail,omitempty"` + + // InventoryVerificationContactName: The name of the contact for the + // inventory verification process. + InventoryVerificationContactName string `json:"inventoryVerificationContactName,omitempty"` + + // InventoryVerificationContactStatus: The status of the verification + // contact. + InventoryVerificationContactStatus string `json:"inventoryVerificationContactStatus,omitempty"` + + // Status: The status of the inventory verification process. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InventoryVerificationContactEmail") to unconditionally include in + // API requests. By default, fields with empty values are omitted from + // API requests. However, any non-pointer, non-interface field appearing + // in ForceSendFields will be sent to the server regardless of whether + // the field is empty or not. This may be used to include empty fields + // in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "InventoryVerificationContactEmail") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiaInventorySettings) MarshalJSON() ([]byte, error) { + type NoMethod LiaInventorySettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiaOnDisplayToOrderSettings struct { + // ShippingCostPolicyUrl: Shipping cost and policy URL. + ShippingCostPolicyUrl string `json:"shippingCostPolicyUrl,omitempty"` + + // Status: The status of the ?On display to order? feature. + Status string `json:"status,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ShippingCostPolicyUrl") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ShippingCostPolicyUrl") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LiaOnDisplayToOrderSettings) MarshalJSON() ([]byte, error) { + type NoMethod LiaOnDisplayToOrderSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiaSettings struct { + // AccountId: The ID of the account to which these LIA settings belong. + // Ignored upon update, always present in get request responses. + AccountId uint64 `json:"accountId,omitempty,string"` + + // CountrySettings: The LIA settings for each country. + CountrySettings []*LiaCountrySettings `json:"countrySettings,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liaSettings". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccountId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccountId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiaSettings) MarshalJSON() ([]byte, error) { + type NoMethod LiaSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsCustomBatchRequest struct { + // Entries: The request entries to be processed in the batch. + Entries []*LiasettingsCustomBatchRequestEntry `json:"entries,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entries") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entries") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsCustomBatchRequest) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsCustomBatchRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsCustomBatchRequestEntry struct { + // AccountId: The ID of the account for which to get/update account + // shipping settings. + AccountId uint64 `json:"accountId,omitempty,string"` + + // BatchId: An entry ID, unique within the batch request. + BatchId int64 `json:"batchId,omitempty"` + + // ContactEmail: Inventory validation contact email. Required only for + // SetInventoryValidationContact. + ContactEmail string `json:"contactEmail,omitempty"` + + // ContactName: Inventory validation contact name. Required only for + // SetInventoryValidationContact. + ContactName string `json:"contactName,omitempty"` + + // Country: The country code. Required only for + // RequestInventoryVerification. + Country string `json:"country,omitempty"` + + // GmbEmail: The GMB account. Required only for RequestGmbAccess. + GmbEmail string `json:"gmbEmail,omitempty"` + + // LiaSettings: The account Lia settings to update. Only defined if the + // method is update. + LiaSettings *LiaSettings `json:"liaSettings,omitempty"` + + // MerchantId: The ID of the managing account. + MerchantId uint64 `json:"merchantId,omitempty,string"` + + Method string `json:"method,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccountId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccountId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsCustomBatchRequestEntry) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsCustomBatchRequestEntry + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsCustomBatchResponse struct { + // Entries: The result of the execution of the batch requests. + Entries []*LiasettingsCustomBatchResponseEntry `json:"entries,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsCustomBatchResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Entries") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entries") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsCustomBatchResponse) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsCustomBatchResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsCustomBatchResponseEntry struct { + // BatchId: The ID of the request entry to which this entry responds. + BatchId int64 `json:"batchId,omitempty"` + + // Errors: A list of errors defined if, and only if, the request failed. + Errors *Errors `json:"errors,omitempty"` + + // GmbAccounts: The the list of accessible GMB accounts. + GmbAccounts *GmbAccounts `json:"gmbAccounts,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsCustomBatchResponseEntry". + Kind string `json:"kind,omitempty"` + + // LiaSettings: The retrieved or updated Lia settings. + LiaSettings *LiaSettings `json:"liaSettings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BatchId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BatchId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsCustomBatchResponseEntry) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsCustomBatchResponseEntry + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsGetAccessibleGmbAccountsResponse struct { + // AccountId: The ID of the account. + AccountId uint64 `json:"accountId,omitempty,string"` + + // GmbAccounts: A list of GMB accounts which are available to the + // merchant. + GmbAccounts []*GmbAccountsGmbAccount `json:"gmbAccounts,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsGetAccessibleGmbAccountsResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccountId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccountId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsGetAccessibleGmbAccountsResponse) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsGetAccessibleGmbAccountsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsListResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsListResponse". + Kind string `json:"kind,omitempty"` + + // NextPageToken: The token for the retrieval of the next page of LIA + // settings. + NextPageToken string `json:"nextPageToken,omitempty"` + + Resources []*LiaSettings `json:"resources,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsListResponse) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsListResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsRequestGmbAccessResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsRequestGmbAccessResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsRequestGmbAccessResponse) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsRequestGmbAccessResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsRequestInventoryVerificationResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsRequestInventoryVerificationResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsRequestInventoryVerificationResponse) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsRequestInventoryVerificationResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LiasettingsSetInventoryVerificationContactResponse struct { + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "content#liasettingsSetInventoryVerificationContactResponse". + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LiasettingsSetInventoryVerificationContactResponse) MarshalJSON() ([]byte, error) { + type NoMethod LiasettingsSetInventoryVerificationContactResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type LocationIdSet struct { // LocationIds: A non-empty list of location IDs. They must all be of // the same location type (e.g., state). @@ -3047,7 +3641,9 @@ func (s *OrderCancellation) MarshalJSON() ([]byte, error) { } type OrderCustomer struct { - // Email: Email address of the customer. + // Email: Email address that should be used for order related + // communications. In certain cases this might not be a real users + // email, but a proxy email. Email string `json:"email,omitempty"` // ExplicitMarketingPreference: If set, this indicates the user @@ -5864,7 +6460,8 @@ type PosInventory struct { // Quantity: The available quantity of the item. Quantity int64 `json:"quantity,omitempty,string"` - // StoreCode: The identifier of the merchant's store. + // StoreCode: The identifier of the merchant's store. Either a storeCode + // inserted via the API or the code of the store in Google My Business. StoreCode string `json:"storeCode,omitempty"` // TargetCountry: The CLDR territory code for the item. @@ -5913,7 +6510,8 @@ type PosInventoryRequest struct { // Quantity: The available quantity of the item. Quantity int64 `json:"quantity,omitempty,string"` - // StoreCode: The identifier of the merchant's store. + // StoreCode: The identifier of the merchant's store. Either a storeCode + // inserted via the API or the code of the store in Google My Business. StoreCode string `json:"storeCode,omitempty"` // TargetCountry: The CLDR territory code for the item. @@ -5966,7 +6564,8 @@ type PosInventoryResponse struct { // Quantity: The available quantity of the item. Quantity int64 `json:"quantity,omitempty,string"` - // StoreCode: The identifier of the merchant's store. + // StoreCode: The identifier of the merchant's store. Either a storeCode + // inserted via the API or the code of the store in Google My Business. StoreCode string `json:"storeCode,omitempty"` // TargetCountry: The CLDR territory code for the item. @@ -6063,7 +6662,8 @@ type PosSale struct { // SaleId: A unique ID to group items from the same sale event. SaleId string `json:"saleId,omitempty"` - // StoreCode: The identifier of the merchant's store. + // StoreCode: The identifier of the merchant's store. Either a storeCode + // inserted via the API or the code of the store in Google My Business. StoreCode string `json:"storeCode,omitempty"` // TargetCountry: The CLDR territory code for the item. @@ -6116,7 +6716,8 @@ type PosSaleRequest struct { // SaleId: A unique ID to group items from the same sale event. SaleId string `json:"saleId,omitempty"` - // StoreCode: The identifier of the merchant's store. + // StoreCode: The identifier of the merchant's store. Either a storeCode + // inserted via the API or the code of the store in Google My Business. StoreCode string `json:"storeCode,omitempty"` // TargetCountry: The CLDR territory code for the item. @@ -6173,7 +6774,8 @@ type PosSaleResponse struct { // SaleId: A unique ID to group items from the same sale event. SaleId string `json:"saleId,omitempty"` - // StoreCode: The identifier of the merchant's store. + // StoreCode: The identifier of the merchant's store. Either a storeCode + // inserted via the API or the code of the store in Google My Business. StoreCode string `json:"storeCode,omitempty"` // TargetCountry: The CLDR territory code for the item. @@ -12819,6 +13421,1422 @@ func (c *InventorySetCall) Do(opts ...googleapi.CallOption) (*InventorySetRespon } +// method id "content.liasettings.custombatch": + +type LiasettingsCustombatchCall struct { + s *APIService + liasettingscustombatchrequest *LiasettingsCustomBatchRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Custombatch: Retrieves and/or updates the LIA settings of multiple +// accounts in a single request. +func (r *LiasettingsService) Custombatch(liasettingscustombatchrequest *LiasettingsCustomBatchRequest) *LiasettingsCustombatchCall { + c := &LiasettingsCustombatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.liasettingscustombatchrequest = liasettingscustombatchrequest + return c +} + +// DryRun sets the optional parameter "dryRun": Flag to run the request +// in dry-run mode. +func (c *LiasettingsCustombatchCall) DryRun(dryRun bool) *LiasettingsCustombatchCall { + c.urlParams_.Set("dryRun", fmt.Sprint(dryRun)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsCustombatchCall) Fields(s ...googleapi.Field) *LiasettingsCustombatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsCustombatchCall) Context(ctx context.Context) *LiasettingsCustombatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsCustombatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsCustombatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.liasettingscustombatchrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "liasettings/batch") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.custombatch" call. +// Exactly one of *LiasettingsCustomBatchResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *LiasettingsCustomBatchResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LiasettingsCustombatchCall) Do(opts ...googleapi.CallOption) (*LiasettingsCustomBatchResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiasettingsCustomBatchResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves and/or updates the LIA settings of multiple accounts in a single request.", + // "httpMethod": "POST", + // "id": "content.liasettings.custombatch", + // "parameters": { + // "dryRun": { + // "description": "Flag to run the request in dry-run mode.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "liasettings/batch", + // "request": { + // "$ref": "LiasettingsCustomBatchRequest" + // }, + // "response": { + // "$ref": "LiasettingsCustomBatchResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.get": + +type LiasettingsGetCall struct { + s *APIService + merchantId uint64 + accountId uint64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the LIA settings of the account. +func (r *LiasettingsService) Get(merchantId uint64, accountId uint64) *LiasettingsGetCall { + c := &LiasettingsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsGetCall) Fields(s ...googleapi.Field) *LiasettingsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LiasettingsGetCall) IfNoneMatch(entityTag string) *LiasettingsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsGetCall) Context(ctx context.Context) *LiasettingsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.get" call. +// Exactly one of *LiaSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LiaSettings.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LiasettingsGetCall) Do(opts ...googleapi.CallOption) (*LiaSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiaSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the LIA settings of the account.", + // "httpMethod": "GET", + // "id": "content.liasettings.get", + // "parameterOrder": [ + // "merchantId", + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account for which to get or update LIA settings.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}", + // "response": { + // "$ref": "LiaSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.getaccessiblegmbaccounts": + +type LiasettingsGetaccessiblegmbaccountsCall struct { + s *APIService + merchantId uint64 + accountId uint64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Getaccessiblegmbaccounts: Retrieves the list of accessible Google My +// Business accounts. +func (r *LiasettingsService) Getaccessiblegmbaccounts(merchantId uint64, accountId uint64) *LiasettingsGetaccessiblegmbaccountsCall { + c := &LiasettingsGetaccessiblegmbaccountsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsGetaccessiblegmbaccountsCall) Fields(s ...googleapi.Field) *LiasettingsGetaccessiblegmbaccountsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LiasettingsGetaccessiblegmbaccountsCall) IfNoneMatch(entityTag string) *LiasettingsGetaccessiblegmbaccountsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsGetaccessiblegmbaccountsCall) Context(ctx context.Context) *LiasettingsGetaccessiblegmbaccountsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsGetaccessiblegmbaccountsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsGetaccessiblegmbaccountsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}/accessiblegmbaccounts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.getaccessiblegmbaccounts" call. +// Exactly one of *LiasettingsGetAccessibleGmbAccountsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *LiasettingsGetAccessibleGmbAccountsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *LiasettingsGetaccessiblegmbaccountsCall) Do(opts ...googleapi.CallOption) (*LiasettingsGetAccessibleGmbAccountsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiasettingsGetAccessibleGmbAccountsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of accessible Google My Business accounts.", + // "httpMethod": "GET", + // "id": "content.liasettings.getaccessiblegmbaccounts", + // "parameterOrder": [ + // "merchantId", + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account for which to retrieve accessible Google My Business accounts.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}/accessiblegmbaccounts", + // "response": { + // "$ref": "LiasettingsGetAccessibleGmbAccountsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.list": + +type LiasettingsListCall struct { + s *APIService + merchantId uint64 + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists the LIA settings of the sub-accounts in your Merchant +// Center account. +func (r *LiasettingsService) List(merchantId uint64) *LiasettingsListCall { + c := &LiasettingsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of LIA settings to return in the response, used for paging. +func (c *LiasettingsListCall) MaxResults(maxResults int64) *LiasettingsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": The token returned +// by the previous request. +func (c *LiasettingsListCall) PageToken(pageToken string) *LiasettingsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsListCall) Fields(s ...googleapi.Field) *LiasettingsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LiasettingsListCall) IfNoneMatch(entityTag string) *LiasettingsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsListCall) Context(ctx context.Context) *LiasettingsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.list" call. +// Exactly one of *LiasettingsListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *LiasettingsListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *LiasettingsListCall) Do(opts ...googleapi.CallOption) (*LiasettingsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiasettingsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the LIA settings of the sub-accounts in your Merchant Center account.", + // "httpMethod": "GET", + // "id": "content.liasettings.list", + // "parameterOrder": [ + // "merchantId" + // ], + // "parameters": { + // "maxResults": { + // "description": "The maximum number of LIA settings to return in the response, used for paging.", + // "format": "uint32", + // "location": "query", + // "type": "integer" + // }, + // "merchantId": { + // "description": "The ID of the managing account. This must be a multi-client account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageToken": { + // "description": "The token returned by the previous request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings", + // "response": { + // "$ref": "LiasettingsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *LiasettingsListCall) Pages(ctx context.Context, f func(*LiasettingsListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "content.liasettings.patch": + +type LiasettingsPatchCall struct { + s *APIService + merchantId uint64 + accountId uint64 + liasettings *LiaSettings + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the LIA settings of the account. This method supports +// patch semantics. +func (r *LiasettingsService) Patch(merchantId uint64, accountId uint64, liasettings *LiaSettings) *LiasettingsPatchCall { + c := &LiasettingsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + c.liasettings = liasettings + return c +} + +// DryRun sets the optional parameter "dryRun": Flag to run the request +// in dry-run mode. +func (c *LiasettingsPatchCall) DryRun(dryRun bool) *LiasettingsPatchCall { + c.urlParams_.Set("dryRun", fmt.Sprint(dryRun)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsPatchCall) Fields(s ...googleapi.Field) *LiasettingsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsPatchCall) Context(ctx context.Context) *LiasettingsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.liasettings) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.patch" call. +// Exactly one of *LiaSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LiaSettings.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LiasettingsPatchCall) Do(opts ...googleapi.CallOption) (*LiaSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiaSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the LIA settings of the account. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "content.liasettings.patch", + // "parameterOrder": [ + // "merchantId", + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account for which to get or update LIA settings.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "dryRun": { + // "description": "Flag to run the request in dry-run mode.", + // "location": "query", + // "type": "boolean" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}", + // "request": { + // "$ref": "LiaSettings" + // }, + // "response": { + // "$ref": "LiaSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.requestgmbaccess": + +type LiasettingsRequestgmbaccessCall struct { + s *APIService + merchantId uint64 + accountId uint64 + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Requestgmbaccess: Requests access to a specified Google My Business +// account. +func (r *LiasettingsService) Requestgmbaccess(merchantId uint64, accountId uint64) *LiasettingsRequestgmbaccessCall { + c := &LiasettingsRequestgmbaccessCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + return c +} + +// GmbEmail sets the optional parameter "gmbEmail": The email of the +// Google My Business account. +func (c *LiasettingsRequestgmbaccessCall) GmbEmail(gmbEmail string) *LiasettingsRequestgmbaccessCall { + c.urlParams_.Set("gmbEmail", gmbEmail) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsRequestgmbaccessCall) Fields(s ...googleapi.Field) *LiasettingsRequestgmbaccessCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsRequestgmbaccessCall) Context(ctx context.Context) *LiasettingsRequestgmbaccessCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsRequestgmbaccessCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsRequestgmbaccessCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}/requestgmbaccess") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.requestgmbaccess" call. +// Exactly one of *LiasettingsRequestGmbAccessResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *LiasettingsRequestGmbAccessResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *LiasettingsRequestgmbaccessCall) Do(opts ...googleapi.CallOption) (*LiasettingsRequestGmbAccessResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiasettingsRequestGmbAccessResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Requests access to a specified Google My Business account.", + // "httpMethod": "POST", + // "id": "content.liasettings.requestgmbaccess", + // "parameterOrder": [ + // "merchantId", + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account for which GMB access is requested.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "gmbEmail": { + // "description": "The email of the Google My Business account.", + // "location": "query", + // "type": "string" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}/requestgmbaccess", + // "response": { + // "$ref": "LiasettingsRequestGmbAccessResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.requestinventoryverification": + +type LiasettingsRequestinventoryverificationCall struct { + s *APIService + merchantId uint64 + accountId uint64 + country string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Requestinventoryverification: Requests inventory validation for the +// specified country. +func (r *LiasettingsService) Requestinventoryverification(merchantId uint64, accountId uint64, country string) *LiasettingsRequestinventoryverificationCall { + c := &LiasettingsRequestinventoryverificationCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + c.country = country + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsRequestinventoryverificationCall) Fields(s ...googleapi.Field) *LiasettingsRequestinventoryverificationCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsRequestinventoryverificationCall) Context(ctx context.Context) *LiasettingsRequestinventoryverificationCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsRequestinventoryverificationCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsRequestinventoryverificationCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}/requestinventoryverification/{country}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + "country": c.country, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.requestinventoryverification" call. +// Exactly one of *LiasettingsRequestInventoryVerificationResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *LiasettingsRequestInventoryVerificationResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *LiasettingsRequestinventoryverificationCall) Do(opts ...googleapi.CallOption) (*LiasettingsRequestInventoryVerificationResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiasettingsRequestInventoryVerificationResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Requests inventory validation for the specified country.", + // "httpMethod": "POST", + // "id": "content.liasettings.requestinventoryverification", + // "parameterOrder": [ + // "merchantId", + // "accountId", + // "country" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account that manages the order. This cannot be a multi-client account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "country": { + // "description": "The country for which inventory validation is requested.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}/requestinventoryverification/{country}", + // "response": { + // "$ref": "LiasettingsRequestInventoryVerificationResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.setinventoryverificationcontact": + +type LiasettingsSetinventoryverificationcontactCall struct { + s *APIService + merchantId uint64 + accountId uint64 + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Setinventoryverificationcontact: Sets the inventory verification +// contract for the specified country. +func (r *LiasettingsService) Setinventoryverificationcontact(merchantId uint64, accountId uint64) *LiasettingsSetinventoryverificationcontactCall { + c := &LiasettingsSetinventoryverificationcontactCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + return c +} + +// ContactEmail sets the optional parameter "contactEmail": The email of +// the inventory verification contact. +func (c *LiasettingsSetinventoryverificationcontactCall) ContactEmail(contactEmail string) *LiasettingsSetinventoryverificationcontactCall { + c.urlParams_.Set("contactEmail", contactEmail) + return c +} + +// ContactName sets the optional parameter "contactName": The name of +// the inventory verification contact. +func (c *LiasettingsSetinventoryverificationcontactCall) ContactName(contactName string) *LiasettingsSetinventoryverificationcontactCall { + c.urlParams_.Set("contactName", contactName) + return c +} + +// Country sets the optional parameter "country": The country for which +// inventory verification is requested. +func (c *LiasettingsSetinventoryverificationcontactCall) Country(country string) *LiasettingsSetinventoryverificationcontactCall { + c.urlParams_.Set("country", country) + return c +} + +// Language sets the optional parameter "language": The language for +// which inventory verification is requested. +func (c *LiasettingsSetinventoryverificationcontactCall) Language(language string) *LiasettingsSetinventoryverificationcontactCall { + c.urlParams_.Set("language", language) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsSetinventoryverificationcontactCall) Fields(s ...googleapi.Field) *LiasettingsSetinventoryverificationcontactCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsSetinventoryverificationcontactCall) Context(ctx context.Context) *LiasettingsSetinventoryverificationcontactCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsSetinventoryverificationcontactCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsSetinventoryverificationcontactCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}/setinventoryverificationcontact") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.setinventoryverificationcontact" call. +// Exactly one of *LiasettingsSetInventoryVerificationContactResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *LiasettingsSetInventoryVerificationContactResponse.ServerResponse.Hea +// der or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *LiasettingsSetinventoryverificationcontactCall) Do(opts ...googleapi.CallOption) (*LiasettingsSetInventoryVerificationContactResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiasettingsSetInventoryVerificationContactResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the inventory verification contract for the specified country.", + // "httpMethod": "POST", + // "id": "content.liasettings.setinventoryverificationcontact", + // "parameterOrder": [ + // "merchantId", + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account that manages the order. This cannot be a multi-client account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contactEmail": { + // "description": "The email of the inventory verification contact.", + // "location": "query", + // "type": "string" + // }, + // "contactName": { + // "description": "The name of the inventory verification contact.", + // "location": "query", + // "type": "string" + // }, + // "country": { + // "description": "The country for which inventory verification is requested.", + // "location": "query", + // "type": "string" + // }, + // "language": { + // "description": "The language for which inventory verification is requested.", + // "location": "query", + // "type": "string" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}/setinventoryverificationcontact", + // "response": { + // "$ref": "LiasettingsSetInventoryVerificationContactResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + +// method id "content.liasettings.update": + +type LiasettingsUpdateCall struct { + s *APIService + merchantId uint64 + accountId uint64 + liasettings *LiaSettings + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the LIA settings of the account. +func (r *LiasettingsService) Update(merchantId uint64, accountId uint64, liasettings *LiaSettings) *LiasettingsUpdateCall { + c := &LiasettingsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.merchantId = merchantId + c.accountId = accountId + c.liasettings = liasettings + return c +} + +// DryRun sets the optional parameter "dryRun": Flag to run the request +// in dry-run mode. +func (c *LiasettingsUpdateCall) DryRun(dryRun bool) *LiasettingsUpdateCall { + c.urlParams_.Set("dryRun", fmt.Sprint(dryRun)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LiasettingsUpdateCall) Fields(s ...googleapi.Field) *LiasettingsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LiasettingsUpdateCall) Context(ctx context.Context) *LiasettingsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LiasettingsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LiasettingsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.liasettings) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{merchantId}/liasettings/{accountId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "merchantId": strconv.FormatUint(c.merchantId, 10), + "accountId": strconv.FormatUint(c.accountId, 10), + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "content.liasettings.update" call. +// Exactly one of *LiaSettings or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *LiaSettings.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LiasettingsUpdateCall) Do(opts ...googleapi.CallOption) (*LiaSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &LiaSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the LIA settings of the account.", + // "httpMethod": "PUT", + // "id": "content.liasettings.update", + // "parameterOrder": [ + // "merchantId", + // "accountId" + // ], + // "parameters": { + // "accountId": { + // "description": "The ID of the account for which to get or update LIA settings.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "dryRun": { + // "description": "Flag to run the request in dry-run mode.", + // "location": "query", + // "type": "boolean" + // }, + // "merchantId": { + // "description": "The ID of the managing account. If this parameter is not the same as accountId, then this account must be a multi-client account and accountId must be the ID of a sub-account of this account.", + // "format": "uint64", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{merchantId}/liasettings/{accountId}", + // "request": { + // "$ref": "LiaSettings" + // }, + // "response": { + // "$ref": "LiaSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/content" + // ] + // } + +} + // method id "content.orders.acknowledge": type OrdersAcknowledgeCall struct { @@ -16096,7 +18114,7 @@ func (c *PosDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "boolean" // }, // "merchantId": { - // "description": "The ID of the POS provider.", + // "description": "The ID of the POS or inventory data provider.", // "format": "uint64", // "location": "path", // "required": true, @@ -16252,7 +18270,7 @@ func (c *PosGetCall) Do(opts ...googleapi.CallOption) (*PosStore, error) { // ], // "parameters": { // "merchantId": { - // "description": "The ID of the POS provider.", + // "description": "The ID of the POS or inventory data provider.", // "format": "uint64", // "location": "path", // "required": true, @@ -16412,7 +18430,7 @@ func (c *PosInsertCall) Do(opts ...googleapi.CallOption) (*PosStore, error) { // "type": "boolean" // }, // "merchantId": { - // "description": "The ID of the POS provider.", + // "description": "The ID of the POS or inventory data provider.", // "format": "uint64", // "location": "path", // "required": true, @@ -16569,7 +18587,7 @@ func (c *PosInventoryCall) Do(opts ...googleapi.CallOption) (*PosInventoryRespon // "type": "boolean" // }, // "merchantId": { - // "description": "The ID of the POS provider.", + // "description": "The ID of the POS or inventory data provider.", // "format": "uint64", // "location": "path", // "required": true, @@ -16721,7 +18739,7 @@ func (c *PosListCall) Do(opts ...googleapi.CallOption) (*PosListResponse, error) // ], // "parameters": { // "merchantId": { - // "description": "The ID of the POS provider.", + // "description": "The ID of the POS or inventory data provider.", // "format": "uint64", // "location": "path", // "required": true, @@ -16875,7 +18893,7 @@ func (c *PosSaleCall) Do(opts ...googleapi.CallOption) (*PosSaleResponse, error) // "type": "boolean" // }, // "merchantId": { - // "description": "The ID of the POS provider.", + // "description": "The ID of the POS or inventory data provider.", // "format": "uint64", // "location": "path", // "required": true, diff --git a/vendor/google.golang.org/api/content/v2sandbox/content-api.json b/vendor/google.golang.org/api/content/v2sandbox/content-api.json index fc69fadcd..ca02e5b40 100644 --- a/vendor/google.golang.org/api/content/v2sandbox/content-api.json +++ b/vendor/google.golang.org/api/content/v2sandbox/content-api.json @@ -15,7 +15,7 @@ "description": "Manages product items, inventory, and Merchant Center accounts for Google Shopping.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/shopping-content", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/QMVTzP4sO9_JHyigCiKlZqLj3Hc\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/GzeGhrgiJKQlQpHAXI2VovJdlQo\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -792,7 +792,7 @@ } } }, - "revision": "20180205", + "revision": "20180316", "rootUrl": "https://www.googleapis.com/", "schemas": { "Error": { @@ -1011,7 +1011,7 @@ "id": "OrderCustomer", "properties": { "email": { - "description": "Email address of the customer.", + "description": "Email address that should be used for order related communications. In certain cases this might not be a real users email, but a proxy email.", "type": "string" }, "explicitMarketingPreference": { diff --git a/vendor/google.golang.org/api/content/v2sandbox/content-gen.go b/vendor/google.golang.org/api/content/v2sandbox/content-gen.go index c99cdda8c..db59b247a 100644 --- a/vendor/google.golang.org/api/content/v2sandbox/content-gen.go +++ b/vendor/google.golang.org/api/content/v2sandbox/content-gen.go @@ -343,7 +343,9 @@ func (s *OrderCancellation) MarshalJSON() ([]byte, error) { } type OrderCustomer struct { - // Email: Email address of the customer. + // Email: Email address that should be used for order related + // communications. In certain cases this might not be a real users + // email, but a proxy email. Email string `json:"email,omitempty"` // ExplicitMarketingPreference: If set, this indicates the user diff --git a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json index 678c0cbba..b4a39a72a 100644 --- a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json +++ b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-api.json @@ -1554,7 +1554,7 @@ } } }, - "revision": "20180220", + "revision": "20180314", "rootUrl": "https://dataflow.googleapis.com/", "schemas": { "ApproximateProgress": { @@ -1826,7 +1826,8 @@ "OR", "AND", "SET", - "DISTRIBUTION" + "DISTRIBUTION", + "LATEST_VALUE" ], "enumDescriptions": [ "Counter aggregation kind was not set.", @@ -1837,7 +1838,8 @@ "Aggregated value represents the logical 'or' of all contributed values.", "Aggregated value represents the logical 'and' of all contributed values.", "Aggregated value is a set of unique contributed values.", - "Aggregated value captures statistics about a distribution." + "Aggregated value captures statistics about a distribution.", + "Aggregated value tracks the latest value of a variable." ], "type": "string" }, @@ -1986,6 +1988,10 @@ "$ref": "SplitInt64", "description": "Integer value for Sum, Max, Min." }, + "integerGauge": { + "$ref": "IntegerGauge", + "description": "Gauge data" + }, "integerList": { "$ref": "IntegerList", "description": "List of integers, for Set." @@ -2587,6 +2593,22 @@ }, "type": "object" }, + "IntegerGauge": { + "description": "A metric value representing temporal values of a variable.", + "id": "IntegerGauge", + "properties": { + "timestamp": { + "description": "The time at which this value was measured. Measured as msecs from epoch.", + "format": "google-datetime", + "type": "string" + }, + "value": { + "$ref": "SplitInt64", + "description": "The value of the variable represented by this gauge." + } + }, + "type": "object" + }, "IntegerList": { "description": "A metric value representing a list of integers.", "id": "IntegerList", @@ -3197,7 +3219,8 @@ "OR", "AND", "SET", - "DISTRIBUTION" + "DISTRIBUTION", + "LATEST_VALUE" ], "enumDescriptions": [ "Counter aggregation kind was not set.", @@ -3208,7 +3231,8 @@ "Aggregated value represents the logical 'or' of all contributed values.", "Aggregated value represents the logical 'and' of all contributed values.", "Aggregated value is a set of unique contributed values.", - "Aggregated value captures statistics about a distribution." + "Aggregated value captures statistics about a distribution.", + "Aggregated value tracks the latest value of a variable." ], "type": "string" }, diff --git a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go index 7bfb4c101..2990b2e60 100644 --- a/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go +++ b/vendor/google.golang.org/api/dataflow/v1b3/dataflow-gen.go @@ -767,6 +767,8 @@ type CounterMetadata struct { // "SET" - Aggregated value is a set of unique contributed values. // "DISTRIBUTION" - Aggregated value captures statistics about a // distribution. + // "LATEST_VALUE" - Aggregated value tracks the latest value of a + // variable. Kind string `json:"kind,omitempty"` // OtherUnits: A string referring to the unit type. @@ -955,6 +957,9 @@ type CounterUpdate struct { // Integer: Integer value for Sum, Max, Min. Integer *SplitInt64 `json:"integer,omitempty"` + // IntegerGauge: Gauge data + IntegerGauge *IntegerGauge `json:"integerGauge,omitempty"` + // IntegerList: List of integers, for Set. IntegerList *IntegerList `json:"integerList,omitempty"` @@ -2068,6 +2073,39 @@ func (s *InstructionOutput) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// IntegerGauge: A metric value representing temporal values of a +// variable. +type IntegerGauge struct { + // Timestamp: The time at which this value was measured. Measured as + // msecs from epoch. + Timestamp string `json:"timestamp,omitempty"` + + // Value: The value of the variable represented by this gauge. + Value *SplitInt64 `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Timestamp") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Timestamp") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *IntegerGauge) MarshalJSON() ([]byte, error) { + type NoMethod IntegerGauge + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // IntegerList: A metric value representing a list of integers. type IntegerList struct { // Elements: Elements of the list. @@ -3262,6 +3300,8 @@ type NameAndKind struct { // "SET" - Aggregated value is a set of unique contributed values. // "DISTRIBUTION" - Aggregated value captures statistics about a // distribution. + // "LATEST_VALUE" - Aggregated value tracks the latest value of a + // variable. Kind string `json:"kind,omitempty"` // Name: Name of the counter. diff --git a/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json index 216a1e207..f44e78e0a 100644 --- a/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1/dataproc-api.json @@ -9,7 +9,7 @@ } }, "basePath": "", - "baseUrl": "https://content-dataproc.googleapis.com/", + "baseUrl": "https://dataproc.googleapis.com/", "batchPath": "batch", "canonicalName": "Dataproc", "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", @@ -783,8 +783,8 @@ } } }, - "revision": "20180306", - "rootUrl": "https://content-dataproc.googleapis.com/", + "revision": "20180312", + "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { "description": "Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine.", diff --git a/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go index 796fb98d9..da0840d99 100644 --- a/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1/dataproc-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "dataproc:v1" const apiName = "dataproc" const apiVersion = "v1" -const basePath = "https://content-dataproc.googleapis.com/" +const basePath = "https://dataproc.googleapis.com/" // OAuth2 scopes used by this API. const ( diff --git a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json index dd72b5b8d..017df3e9f 100644 --- a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json +++ b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-api.json @@ -9,7 +9,7 @@ } }, "basePath": "", - "baseUrl": "https://content-dataproc.googleapis.com/", + "baseUrl": "https://dataproc.googleapis.com/", "batchPath": "batch", "canonicalName": "Dataproc", "description": "Manages Hadoop-based clusters and jobs on Google Cloud Platform.", @@ -1630,8 +1630,8 @@ } } }, - "revision": "20180306", - "rootUrl": "https://content-dataproc.googleapis.com/", + "revision": "20180320", + "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { "description": "Specifies the type and number of accelerator cards attached to the instances of an instance group (see GPUs on Compute Engine).", @@ -2508,7 +2508,7 @@ "id": "ManagedCluster", "properties": { "clusterName": { - "description": "Required. The cluster name. Cluster names within a project must be unique. Names from deleted clusters can be reused.", + "description": "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix.The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", "type": "string" }, "config": { @@ -2519,7 +2519,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 64 labels can be associated with a given cluster.", + "description": "Optional. The labels to associate with this cluster.Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given cluster.", "type": "object" } }, @@ -2607,7 +2607,7 @@ "additionalProperties": { "type": "string" }, - "description": "Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 64 labels can be associated with a given job.", + "description": "Optional. The labels to associate with this job.Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}{0,62}Label values must be between 1 and 63 characters long, and must conform to the following regular expression: \\p{Ll}\\p{Lo}\\p{N}_-{0,63}No more than 32 labels can be associated with a given job.", "type": "object" }, "pigJob": { @@ -2638,7 +2638,7 @@ "description": "Job is a SparkSql job." }, "stepId": { - "description": "Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job workflow-step-id label, and in prerequisite_step_ids field from other steps.", + "description": "Required. The step id. The id must be unique among all jobs within the template.The step id is used as prefix for job id, as job goog-dataproc-workflow-step-id label, and in prerequisiteStepIds field from other steps.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", "type": "string" } }, @@ -3010,6 +3010,13 @@ "$ref": "WorkflowGraph", "description": "Output only. The workflow graph." }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "Map from parameter names to values that were used for those parameters.", + "type": "object" + }, "state": { "description": "Output only. The workflow state.", "enum": [ @@ -3094,7 +3101,7 @@ "type": "string" }, "id": { - "description": "Required. The template id.", + "description": "Required. The template id.The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", "type": "string" }, "jobs": { diff --git a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go index 41acaa9f5..070faf3a5 100644 --- a/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go +++ b/vendor/google.golang.org/api/dataproc/v1beta2/dataproc-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "dataproc:v1beta2" const apiName = "dataproc" const apiVersion = "v1beta2" -const basePath = "https://content-dataproc.googleapis.com/" +const basePath = "https://dataproc.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -1590,8 +1590,11 @@ func (s *LoggingConfig) MarshalJSON() ([]byte, error) { // ManagedCluster: Cluster that is managed by the workflow. type ManagedCluster struct { - // ClusterName: Required. The cluster name. Cluster names within a - // project must be unique. Names from deleted clusters can be reused. + // ClusterName: Required. The cluster name prefix. A unique cluster name + // will be formed by appending a random suffix.The name must contain + // only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must + // begin with a letter. Cannot begin or end with hyphen. Must consist of + // between 2 and 35 characters. ClusterName string `json:"clusterName,omitempty"` // Config: Required. The cluster configuration. @@ -1602,7 +1605,7 @@ type ManagedCluster struct { // the following PCRE regular expression: \p{Ll}\p{Lo}{0,62}Label values // must be between 1 and 63 characters long, and must conform to the // following PCRE regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more - // than 64 labels can be associated with a given cluster. + // than 32 labels can be associated with a given cluster. Labels map[string]string `json:"labels,omitempty"` // ForceSendFields is a list of field names (e.g. "ClusterName") to @@ -1774,7 +1777,7 @@ type OrderedJob struct { // must be between 1 and 63 characters long, and must conform to the // following regular expression: \p{Ll}\p{Lo}{0,62}Label values must be // between 1 and 63 characters long, and must conform to the following - // regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels + // regular expression: \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 32 labels // can be associated with a given job. Labels map[string]string `json:"labels,omitempty"` @@ -1800,8 +1803,11 @@ type OrderedJob struct { // StepId: Required. The step id. The id must be unique among all jobs // within the template.The step id is used as prefix for job id, as job - // workflow-step-id label, and in prerequisite_step_ids field from other - // steps. + // goog-dataproc-workflow-step-id label, and in prerequisiteStepIds + // field from other steps.The id must contain only letters (a-z, A-Z), + // numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end + // with underscore or hyphen. Must consist of between 3 and 50 + // characters. StepId string `json:"stepId,omitempty"` // ForceSendFields is a list of field names (e.g. "HadoopJob") to @@ -2470,6 +2476,10 @@ type WorkflowMetadata struct { // Graph: Output only. The workflow graph. Graph *WorkflowGraph `json:"graph,omitempty"` + // Parameters: Map from parameter names to values that were used for + // those parameters. + Parameters map[string]string `json:"parameters,omitempty"` + // State: Output only. The workflow state. // // Possible values: @@ -2564,7 +2574,10 @@ type WorkflowTemplate struct { // CreateTime: Output only. The time template was created. CreateTime string `json:"createTime,omitempty"` - // Id: Required. The template id. + // Id: Required. The template id.The id must contain only letters (a-z, + // A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin + // or end with underscore or hyphen. Must consist of between 3 and 50 + // characters. Id string `json:"id,omitempty"` // Jobs: Required. The Directed Acyclic Graph of Jobs to submit. diff --git a/vendor/google.golang.org/api/datastore/v1/datastore-api.json b/vendor/google.golang.org/api/datastore/v1/datastore-api.json index 25685c8a1..ec291f75d 100644 --- a/vendor/google.golang.org/api/datastore/v1/datastore-api.json +++ b/vendor/google.golang.org/api/datastore/v1/datastore-api.json @@ -202,6 +202,62 @@ "https://www.googleapis.com/auth/datastore" ] }, + "export": { + "description": "Exports a copy of all or a subset of entities from Google Cloud Datastore\nto another storage system, such as Google Cloud Storage. Recent updates to\nentities may not be reflected in the export. The export occurs in the\nbackground and its progress can be monitored and managed via the\nOperation resource that is created. The output of an export may only be\nused once the associated operation is done. If an export operation is\ncancelled before completion it may leave partial data behind in Google\nCloud Storage.", + "flatPath": "v1/projects/{projectId}:export", + "httpMethod": "POST", + "id": "datastore.projects.export", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "projectId": { + "description": "Project ID against which to make the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}:export", + "request": { + "$ref": "GoogleDatastoreAdminV1ExportEntitiesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore" + ] + }, + "import": { + "description": "Imports entities into Google Cloud Datastore. Existing entities with the\nsame key are overwritten. The import occurs in the background and its\nprogress can be monitored and managed via the Operation resource that is\ncreated. If an ImportEntities operation is cancelled, it is possible\nthat a subset of the data has already been imported to Cloud Datastore.", + "flatPath": "v1/projects/{projectId}:import", + "httpMethod": "POST", + "id": "datastore.projects.import", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "projectId": { + "description": "Project ID against which to make the request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "v1/projects/{projectId}:import", + "request": { + "$ref": "GoogleDatastoreAdminV1ImportEntitiesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/datastore" + ] + }, "lookup": { "description": "Looks up entities by key.", "flatPath": "v1/projects/{projectId}:lookup", @@ -443,7 +499,7 @@ } } }, - "revision": "20180117", + "revision": "20180321", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "AllocateIdsRequest": { @@ -649,6 +705,215 @@ }, "type": "object" }, + "GoogleDatastoreAdminV1CommonMetadata": { + "description": "Metadata common to all Datastore Admin operations.", + "id": "GoogleDatastoreAdminV1CommonMetadata", + "properties": { + "endTime": { + "description": "The time the operation ended, either successfully or otherwise.", + "format": "google-datetime", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The client-assigned labels which were provided when the operation was\ncreated. May also include additional labels.", + "type": "object" + }, + "operationType": { + "description": "The type of the operation. Can be used as a filter in\nListOperationsRequest.", + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "EXPORT_ENTITIES", + "IMPORT_ENTITIES" + ], + "enumDescriptions": [ + "Unspecified.", + "ExportEntities.", + "ImportEntities." + ], + "type": "string" + }, + "startTime": { + "description": "The time that work began on the operation.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "The current state of the Operation.", + "enum": [ + "STATE_UNSPECIFIED", + "INITIALIZING", + "PROCESSING", + "CANCELLING", + "FINALIZING", + "SUCCESSFUL", + "FAILED", + "CANCELLED" + ], + "enumDescriptions": [ + "Unspecified.", + "Request is being prepared for processing.", + "Request is actively being processed.", + "Request is in the process of being cancelled after user called\ngoogle.longrunning.Operations.CancelOperation on the operation.", + "Request has been processed and is in its finalization stage.", + "Request has completed successfully.", + "Request has finished being processed, but encountered an error.", + "Request has finished being cancelled after user called\ngoogle.longrunning.Operations.CancelOperation." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1EntityFilter": { + "description": "Identifies a subset of entities in a project. This is specified as\ncombinations of kinds and namespaces (either or both of which may be all, as\ndescribed in the following examples).\nExample usage:\n\nEntire project:\n kinds=[], namespace_ids=[]\n\nKinds Foo and Bar in all namespaces:\n kinds=['Foo', 'Bar'], namespace_ids=[]\n\nKinds Foo and Bar only in the default namespace:\n kinds=['Foo', 'Bar'], namespace_ids=['']\n\nKinds Foo and Bar in both the default and Baz namespaces:\n kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz']\n\nThe entire Baz namespace:\n kinds=[], namespace_ids=['Baz']", + "id": "GoogleDatastoreAdminV1EntityFilter", + "properties": { + "kinds": { + "description": "If empty, then this represents all kinds.", + "items": { + "type": "string" + }, + "type": "array" + }, + "namespaceIds": { + "description": "An empty list represents all namespaces. This is the preferred\nusage for projects that don't use namespaces.\n\nAn empty string element represents the default namespace. This should be\nused if the project has data in non-default namespaces, but doesn't want to\ninclude them.\nEach namespace in this list must be unique.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesMetadata": { + "description": "Metadata for ExportEntities operations.", + "id": "GoogleDatastoreAdminV1ExportEntitiesMetadata", + "properties": { + "common": { + "$ref": "GoogleDatastoreAdminV1CommonMetadata", + "description": "Metadata common to all Datastore Admin operations." + }, + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of which entities are being exported." + }, + "outputUrlPrefix": { + "description": "Location for the export metadata and data files. This will be the same\nvalue as the\ngoogle.datastore.admin.v1.ExportEntitiesRequest.output_url_prefix\nfield. The final output location is provided in\ngoogle.datastore.admin.v1.ExportEntitiesResponse.output_url.", + "type": "string" + }, + "progressBytes": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of bytes processed." + }, + "progressEntities": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of entities processed." + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesRequest": { + "description": "The request for\ngoogle.datastore.admin.v1.DatastoreAdmin.ExportEntities.", + "id": "GoogleDatastoreAdminV1ExportEntitiesRequest", + "properties": { + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of what data from the project is included in the export." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Client-assigned labels.", + "type": "object" + }, + "outputUrlPrefix": { + "description": "Location for the export metadata and data files.\n\nThe full resource URL of the external storage location. Currently, only\nGoogle Cloud Storage is supported. So output_url_prefix should be of the\nform: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the\nname of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional Cloud\nStorage namespace path (this is not a Cloud Datastore namespace). For more\ninformation about Cloud Storage namespace paths, see\n[Object name\nconsiderations](https://cloud.google.com/storage/docs/naming#object-considerations).\n\nThe resulting files will be nested deeper than the specified URL prefix.\nThe final output URL will be provided in the\ngoogle.datastore.admin.v1.ExportEntitiesResponse.output_url field. That\nvalue should be used for subsequent ImportEntities operations.\n\nBy nesting the data files deeper, the same Cloud Storage bucket can be used\nin multiple ExportEntities operations without conflict.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesResponse": { + "description": "The response for\ngoogle.datastore.admin.v1.DatastoreAdmin.ExportEntities.", + "id": "GoogleDatastoreAdminV1ExportEntitiesResponse", + "properties": { + "outputUrl": { + "description": "Location of the output metadata file. This can be used to begin an import\ninto Cloud Datastore (this project or another project). See\ngoogle.datastore.admin.v1.ImportEntitiesRequest.input_url.\nOnly present if the operation completed successfully.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ImportEntitiesMetadata": { + "description": "Metadata for ImportEntities operations.", + "id": "GoogleDatastoreAdminV1ImportEntitiesMetadata", + "properties": { + "common": { + "$ref": "GoogleDatastoreAdminV1CommonMetadata", + "description": "Metadata common to all Datastore Admin operations." + }, + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of which entities are being imported." + }, + "inputUrl": { + "description": "The location of the import metadata file. This will be the same value as\nthe google.datastore.admin.v1.ExportEntitiesResponse.output_url field.", + "type": "string" + }, + "progressBytes": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of bytes processed." + }, + "progressEntities": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of entities processed." + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ImportEntitiesRequest": { + "description": "The request for\ngoogle.datastore.admin.v1.DatastoreAdmin.ImportEntities.", + "id": "GoogleDatastoreAdminV1ImportEntitiesRequest", + "properties": { + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Optionally specify which kinds/namespaces are to be imported. If provided,\nthe list must be a subset of the EntityFilter used in creating the export,\notherwise a FAILED_PRECONDITION error will be returned. If no filter is\nspecified then all entities from the export are imported." + }, + "inputUrl": { + "description": "The full resource URL of the external storage location. Currently, only\nGoogle Cloud Storage is supported. So input_url should be of the form:\n`gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE`, where\n`BUCKET_NAME` is the name of the Cloud Storage bucket, `NAMESPACE_PATH` is\nan optional Cloud Storage namespace path (this is not a Cloud Datastore\nnamespace), and `OVERALL_EXPORT_METADATA_FILE` is the metadata file written\nby the ExportEntities operation. For more information about Cloud Storage\nnamespace paths, see\n[Object name\nconsiderations](https://cloud.google.com/storage/docs/naming#object-considerations).\n\nFor more information, see\ngoogle.datastore.admin.v1.ExportEntitiesResponse.output_url.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Client-assigned labels.", + "type": "object" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1Progress": { + "description": "Measures the progress of a particular metric.", + "id": "GoogleDatastoreAdminV1Progress", + "properties": { + "workCompleted": { + "description": "The amount of work that has been completed. Note that this may be greater\nthan work_estimated.", + "format": "int64", + "type": "string" + }, + "workEstimated": { + "description": "An estimate of how much work needs to be performed. May be zero if the\nwork estimate is unavailable.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "GoogleDatastoreAdminV1beta1CommonMetadata": { "description": "Metadata common to all Datastore Admin operations.", "id": "GoogleDatastoreAdminV1beta1CommonMetadata", diff --git a/vendor/google.golang.org/api/datastore/v1/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1/datastore-gen.go index 2119e341d..29a48dd5d 100644 --- a/vendor/google.golang.org/api/datastore/v1/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1/datastore-gen.go @@ -541,6 +541,427 @@ func (s *Filter) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleDatastoreAdminV1CommonMetadata: Metadata common to all +// Datastore Admin operations. +type GoogleDatastoreAdminV1CommonMetadata struct { + // EndTime: The time the operation ended, either successfully or + // otherwise. + EndTime string `json:"endTime,omitempty"` + + // Labels: The client-assigned labels which were provided when the + // operation was + // created. May also include additional labels. + Labels map[string]string `json:"labels,omitempty"` + + // OperationType: The type of the operation. Can be used as a filter + // in + // ListOperationsRequest. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Unspecified. + // "EXPORT_ENTITIES" - ExportEntities. + // "IMPORT_ENTITIES" - ImportEntities. + OperationType string `json:"operationType,omitempty"` + + // StartTime: The time that work began on the operation. + StartTime string `json:"startTime,omitempty"` + + // State: The current state of the Operation. + // + // Possible values: + // "STATE_UNSPECIFIED" - Unspecified. + // "INITIALIZING" - Request is being prepared for processing. + // "PROCESSING" - Request is actively being processed. + // "CANCELLING" - Request is in the process of being cancelled after + // user called + // google.longrunning.Operations.CancelOperation on the operation. + // "FINALIZING" - Request has been processed and is in its + // finalization stage. + // "SUCCESSFUL" - Request has completed successfully. + // "FAILED" - Request has finished being processed, but encountered an + // error. + // "CANCELLED" - Request has finished being cancelled after user + // called + // google.longrunning.Operations.CancelOperation. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1CommonMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1CommonMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1EntityFilter: Identifies a subset of entities +// in a project. This is specified as +// combinations of kinds and namespaces (either or both of which may be +// all, as +// described in the following examples). +// Example usage: +// +// Entire project: +// kinds=[], namespace_ids=[] +// +// Kinds Foo and Bar in all namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=[] +// +// Kinds Foo and Bar only in the default namespace: +// kinds=['Foo', 'Bar'], namespace_ids=[''] +// +// Kinds Foo and Bar in both the default and Baz namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz'] +// +// The entire Baz namespace: +// kinds=[], namespace_ids=['Baz'] +type GoogleDatastoreAdminV1EntityFilter struct { + // Kinds: If empty, then this represents all kinds. + Kinds []string `json:"kinds,omitempty"` + + // NamespaceIds: An empty list represents all namespaces. This is the + // preferred + // usage for projects that don't use namespaces. + // + // An empty string element represents the default namespace. This should + // be + // used if the project has data in non-default namespaces, but doesn't + // want to + // include them. + // Each namespace in this list must be unique. + NamespaceIds []string `json:"namespaceIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kinds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kinds") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1EntityFilter) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1EntityFilter + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesMetadata: Metadata for +// ExportEntities operations. +type GoogleDatastoreAdminV1ExportEntitiesMetadata struct { + // Common: Metadata common to all Datastore Admin operations. + Common *GoogleDatastoreAdminV1CommonMetadata `json:"common,omitempty"` + + // EntityFilter: Description of which entities are being exported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // OutputUrlPrefix: Location for the export metadata and data files. + // This will be the same + // value as + // the + // google.datastore.admin.v1.ExportEntitiesRequest.output_url_prefix + // + // field. The final output location is provided + // in + // google.datastore.admin.v1.ExportEntitiesResponse.output_url. + OutputUrlPrefix string `json:"outputUrlPrefix,omitempty"` + + // ProgressBytes: An estimate of the number of bytes processed. + ProgressBytes *GoogleDatastoreAdminV1Progress `json:"progressBytes,omitempty"` + + // ProgressEntities: An estimate of the number of entities processed. + ProgressEntities *GoogleDatastoreAdminV1Progress `json:"progressEntities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesRequest: The request +// for +// google.datastore.admin.v1.DatastoreAdmin.ExportEntities. +type GoogleDatastoreAdminV1ExportEntitiesRequest struct { + // EntityFilter: Description of what data from the project is included + // in the export. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // Labels: Client-assigned labels. + Labels map[string]string `json:"labels,omitempty"` + + // OutputUrlPrefix: Location for the export metadata and data + // files. + // + // The full resource URL of the external storage location. Currently, + // only + // Google Cloud Storage is supported. So output_url_prefix should be of + // the + // form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is + // the + // name of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional + // Cloud + // Storage namespace path (this is not a Cloud Datastore namespace). For + // more + // information about Cloud Storage namespace paths, see + // [Object + // name + // considerations](https://cloud.google.com/storage/docs/naming#obje + // ct-considerations). + // + // The resulting files will be nested deeper than the specified URL + // prefix. + // The final output URL will be provided in + // the + // google.datastore.admin.v1.ExportEntitiesResponse.output_url field. + // That + // value should be used for subsequent ImportEntities operations. + // + // By nesting the data files deeper, the same Cloud Storage bucket can + // be used + // in multiple ExportEntities operations without conflict. + OutputUrlPrefix string `json:"outputUrlPrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityFilter") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityFilter") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesResponse: The response +// for +// google.datastore.admin.v1.DatastoreAdmin.ExportEntities. +type GoogleDatastoreAdminV1ExportEntitiesResponse struct { + // OutputUrl: Location of the output metadata file. This can be used to + // begin an import + // into Cloud Datastore (this project or another project). + // See + // google.datastore.admin.v1.ImportEntitiesRequest.input_url. + // Only present if the operation completed successfully. + OutputUrl string `json:"outputUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OutputUrl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OutputUrl") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ImportEntitiesMetadata: Metadata for +// ImportEntities operations. +type GoogleDatastoreAdminV1ImportEntitiesMetadata struct { + // Common: Metadata common to all Datastore Admin operations. + Common *GoogleDatastoreAdminV1CommonMetadata `json:"common,omitempty"` + + // EntityFilter: Description of which entities are being imported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // InputUrl: The location of the import metadata file. This will be the + // same value as + // the google.datastore.admin.v1.ExportEntitiesResponse.output_url + // field. + InputUrl string `json:"inputUrl,omitempty"` + + // ProgressBytes: An estimate of the number of bytes processed. + ProgressBytes *GoogleDatastoreAdminV1Progress `json:"progressBytes,omitempty"` + + // ProgressEntities: An estimate of the number of entities processed. + ProgressEntities *GoogleDatastoreAdminV1Progress `json:"progressEntities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ImportEntitiesMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ImportEntitiesMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ImportEntitiesRequest: The request +// for +// google.datastore.admin.v1.DatastoreAdmin.ImportEntities. +type GoogleDatastoreAdminV1ImportEntitiesRequest struct { + // EntityFilter: Optionally specify which kinds/namespaces are to be + // imported. If provided, + // the list must be a subset of the EntityFilter used in creating the + // export, + // otherwise a FAILED_PRECONDITION error will be returned. If no filter + // is + // specified then all entities from the export are imported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // InputUrl: The full resource URL of the external storage location. + // Currently, only + // Google Cloud Storage is supported. So input_url should be of the + // form: + // `gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE` + // , where + // `BUCKET_NAME` is the name of the Cloud Storage bucket, + // `NAMESPACE_PATH` is + // an optional Cloud Storage namespace path (this is not a Cloud + // Datastore + // namespace), and `OVERALL_EXPORT_METADATA_FILE` is the metadata file + // written + // by the ExportEntities operation. For more information about Cloud + // Storage + // namespace paths, see + // [Object + // name + // considerations](https://cloud.google.com/storage/docs/naming#obje + // ct-considerations). + // + // For more information, + // see + // google.datastore.admin.v1.ExportEntitiesResponse.output_url. + InputUrl string `json:"inputUrl,omitempty"` + + // Labels: Client-assigned labels. + Labels map[string]string `json:"labels,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityFilter") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityFilter") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ImportEntitiesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ImportEntitiesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1Progress: Measures the progress of a particular +// metric. +type GoogleDatastoreAdminV1Progress struct { + // WorkCompleted: The amount of work that has been completed. Note that + // this may be greater + // than work_estimated. + WorkCompleted int64 `json:"workCompleted,omitempty,string"` + + // WorkEstimated: An estimate of how much work needs to be performed. + // May be zero if the + // work estimate is unavailable. + WorkEstimated int64 `json:"workEstimated,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "WorkCompleted") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WorkCompleted") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1Progress) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1Progress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleDatastoreAdminV1beta1CommonMetadata: Metadata common to all // Datastore Admin operations. type GoogleDatastoreAdminV1beta1CommonMetadata struct { @@ -2663,6 +3084,299 @@ func (c *ProjectsCommitCall) Do(opts ...googleapi.CallOption) (*CommitResponse, } +// method id "datastore.projects.export": + +type ProjectsExportCall struct { + s *Service + projectId string + googledatastoreadminv1exportentitiesrequest *GoogleDatastoreAdminV1ExportEntitiesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Export: Exports a copy of all or a subset of entities from Google +// Cloud Datastore +// to another storage system, such as Google Cloud Storage. Recent +// updates to +// entities may not be reflected in the export. The export occurs in +// the +// background and its progress can be monitored and managed via +// the +// Operation resource that is created. The output of an export may only +// be +// used once the associated operation is done. If an export operation +// is +// cancelled before completion it may leave partial data behind in +// Google +// Cloud Storage. +func (r *ProjectsService) Export(projectId string, googledatastoreadminv1exportentitiesrequest *GoogleDatastoreAdminV1ExportEntitiesRequest) *ProjectsExportCall { + c := &ProjectsExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.googledatastoreadminv1exportentitiesrequest = googledatastoreadminv1exportentitiesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsExportCall) Fields(s ...googleapi.Field) *ProjectsExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsExportCall) Context(ctx context.Context) *ProjectsExportCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsExportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsExportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googledatastoreadminv1exportentitiesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}:export") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "datastore.projects.export" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsExportCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Exports a copy of all or a subset of entities from Google Cloud Datastore\nto another storage system, such as Google Cloud Storage. Recent updates to\nentities may not be reflected in the export. The export occurs in the\nbackground and its progress can be monitored and managed via the\nOperation resource that is created. The output of an export may only be\nused once the associated operation is done. If an export operation is\ncancelled before completion it may leave partial data behind in Google\nCloud Storage.", + // "flatPath": "v1/projects/{projectId}:export", + // "httpMethod": "POST", + // "id": "datastore.projects.export", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID against which to make the request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}:export", + // "request": { + // "$ref": "GoogleDatastoreAdminV1ExportEntitiesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/datastore" + // ] + // } + +} + +// method id "datastore.projects.import": + +type ProjectsImportCall struct { + s *Service + projectId string + googledatastoreadminv1importentitiesrequest *GoogleDatastoreAdminV1ImportEntitiesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Import: Imports entities into Google Cloud Datastore. Existing +// entities with the +// same key are overwritten. The import occurs in the background and +// its +// progress can be monitored and managed via the Operation resource that +// is +// created. If an ImportEntities operation is cancelled, it is +// possible +// that a subset of the data has already been imported to Cloud +// Datastore. +func (r *ProjectsService) Import(projectId string, googledatastoreadminv1importentitiesrequest *GoogleDatastoreAdminV1ImportEntitiesRequest) *ProjectsImportCall { + c := &ProjectsImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.googledatastoreadminv1importentitiesrequest = googledatastoreadminv1importentitiesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsImportCall) Fields(s ...googleapi.Field) *ProjectsImportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsImportCall) Context(ctx context.Context) *ProjectsImportCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsImportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsImportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googledatastoreadminv1importentitiesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/projects/{projectId}:import") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "datastore.projects.import" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsImportCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Imports entities into Google Cloud Datastore. Existing entities with the\nsame key are overwritten. The import occurs in the background and its\nprogress can be monitored and managed via the Operation resource that is\ncreated. If an ImportEntities operation is cancelled, it is possible\nthat a subset of the data has already been imported to Cloud Datastore.", + // "flatPath": "v1/projects/{projectId}:import", + // "httpMethod": "POST", + // "id": "datastore.projects.import", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID against which to make the request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/projects/{projectId}:import", + // "request": { + // "$ref": "GoogleDatastoreAdminV1ImportEntitiesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/datastore" + // ] + // } + +} + // method id "datastore.projects.lookup": type ProjectsLookupCall struct { diff --git a/vendor/google.golang.org/api/datastore/v1beta1/datastore-api.json b/vendor/google.golang.org/api/datastore/v1beta1/datastore-api.json index a96ae18e9..f0f8ec0a1 100644 --- a/vendor/google.golang.org/api/datastore/v1beta1/datastore-api.json +++ b/vendor/google.golang.org/api/datastore/v1beta1/datastore-api.json @@ -177,9 +177,174 @@ } } }, - "revision": "20180117", + "revision": "20180321", "rootUrl": "https://datastore.googleapis.com/", "schemas": { + "GoogleDatastoreAdminV1CommonMetadata": { + "description": "Metadata common to all Datastore Admin operations.", + "id": "GoogleDatastoreAdminV1CommonMetadata", + "properties": { + "endTime": { + "description": "The time the operation ended, either successfully or otherwise.", + "format": "google-datetime", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The client-assigned labels which were provided when the operation was\ncreated. May also include additional labels.", + "type": "object" + }, + "operationType": { + "description": "The type of the operation. Can be used as a filter in\nListOperationsRequest.", + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "EXPORT_ENTITIES", + "IMPORT_ENTITIES" + ], + "enumDescriptions": [ + "Unspecified.", + "ExportEntities.", + "ImportEntities." + ], + "type": "string" + }, + "startTime": { + "description": "The time that work began on the operation.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "The current state of the Operation.", + "enum": [ + "STATE_UNSPECIFIED", + "INITIALIZING", + "PROCESSING", + "CANCELLING", + "FINALIZING", + "SUCCESSFUL", + "FAILED", + "CANCELLED" + ], + "enumDescriptions": [ + "Unspecified.", + "Request is being prepared for processing.", + "Request is actively being processed.", + "Request is in the process of being cancelled after user called\ngoogle.longrunning.Operations.CancelOperation on the operation.", + "Request has been processed and is in its finalization stage.", + "Request has completed successfully.", + "Request has finished being processed, but encountered an error.", + "Request has finished being cancelled after user called\ngoogle.longrunning.Operations.CancelOperation." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1EntityFilter": { + "description": "Identifies a subset of entities in a project. This is specified as\ncombinations of kinds and namespaces (either or both of which may be all, as\ndescribed in the following examples).\nExample usage:\n\nEntire project:\n kinds=[], namespace_ids=[]\n\nKinds Foo and Bar in all namespaces:\n kinds=['Foo', 'Bar'], namespace_ids=[]\n\nKinds Foo and Bar only in the default namespace:\n kinds=['Foo', 'Bar'], namespace_ids=['']\n\nKinds Foo and Bar in both the default and Baz namespaces:\n kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz']\n\nThe entire Baz namespace:\n kinds=[], namespace_ids=['Baz']", + "id": "GoogleDatastoreAdminV1EntityFilter", + "properties": { + "kinds": { + "description": "If empty, then this represents all kinds.", + "items": { + "type": "string" + }, + "type": "array" + }, + "namespaceIds": { + "description": "An empty list represents all namespaces. This is the preferred\nusage for projects that don't use namespaces.\n\nAn empty string element represents the default namespace. This should be\nused if the project has data in non-default namespaces, but doesn't want to\ninclude them.\nEach namespace in this list must be unique.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesMetadata": { + "description": "Metadata for ExportEntities operations.", + "id": "GoogleDatastoreAdminV1ExportEntitiesMetadata", + "properties": { + "common": { + "$ref": "GoogleDatastoreAdminV1CommonMetadata", + "description": "Metadata common to all Datastore Admin operations." + }, + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of which entities are being exported." + }, + "outputUrlPrefix": { + "description": "Location for the export metadata and data files. This will be the same\nvalue as the\ngoogle.datastore.admin.v1.ExportEntitiesRequest.output_url_prefix\nfield. The final output location is provided in\ngoogle.datastore.admin.v1.ExportEntitiesResponse.output_url.", + "type": "string" + }, + "progressBytes": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of bytes processed." + }, + "progressEntities": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of entities processed." + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesResponse": { + "description": "The response for\ngoogle.datastore.admin.v1.DatastoreAdmin.ExportEntities.", + "id": "GoogleDatastoreAdminV1ExportEntitiesResponse", + "properties": { + "outputUrl": { + "description": "Location of the output metadata file. This can be used to begin an import\ninto Cloud Datastore (this project or another project). See\ngoogle.datastore.admin.v1.ImportEntitiesRequest.input_url.\nOnly present if the operation completed successfully.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ImportEntitiesMetadata": { + "description": "Metadata for ImportEntities operations.", + "id": "GoogleDatastoreAdminV1ImportEntitiesMetadata", + "properties": { + "common": { + "$ref": "GoogleDatastoreAdminV1CommonMetadata", + "description": "Metadata common to all Datastore Admin operations." + }, + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of which entities are being imported." + }, + "inputUrl": { + "description": "The location of the import metadata file. This will be the same value as\nthe google.datastore.admin.v1.ExportEntitiesResponse.output_url field.", + "type": "string" + }, + "progressBytes": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of bytes processed." + }, + "progressEntities": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of entities processed." + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1Progress": { + "description": "Measures the progress of a particular metric.", + "id": "GoogleDatastoreAdminV1Progress", + "properties": { + "workCompleted": { + "description": "The amount of work that has been completed. Note that this may be greater\nthan work_estimated.", + "format": "int64", + "type": "string" + }, + "workEstimated": { + "description": "An estimate of how much work needs to be performed. May be zero if the\nwork estimate is unavailable.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "GoogleDatastoreAdminV1beta1CommonMetadata": { "description": "Metadata common to all Datastore Admin operations.", "id": "GoogleDatastoreAdminV1beta1CommonMetadata", diff --git a/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go index 33ed9e909..4de19ecc4 100644 --- a/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1beta1/datastore-gen.go @@ -87,6 +87,297 @@ type ProjectsService struct { s *Service } +// GoogleDatastoreAdminV1CommonMetadata: Metadata common to all +// Datastore Admin operations. +type GoogleDatastoreAdminV1CommonMetadata struct { + // EndTime: The time the operation ended, either successfully or + // otherwise. + EndTime string `json:"endTime,omitempty"` + + // Labels: The client-assigned labels which were provided when the + // operation was + // created. May also include additional labels. + Labels map[string]string `json:"labels,omitempty"` + + // OperationType: The type of the operation. Can be used as a filter + // in + // ListOperationsRequest. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Unspecified. + // "EXPORT_ENTITIES" - ExportEntities. + // "IMPORT_ENTITIES" - ImportEntities. + OperationType string `json:"operationType,omitempty"` + + // StartTime: The time that work began on the operation. + StartTime string `json:"startTime,omitempty"` + + // State: The current state of the Operation. + // + // Possible values: + // "STATE_UNSPECIFIED" - Unspecified. + // "INITIALIZING" - Request is being prepared for processing. + // "PROCESSING" - Request is actively being processed. + // "CANCELLING" - Request is in the process of being cancelled after + // user called + // google.longrunning.Operations.CancelOperation on the operation. + // "FINALIZING" - Request has been processed and is in its + // finalization stage. + // "SUCCESSFUL" - Request has completed successfully. + // "FAILED" - Request has finished being processed, but encountered an + // error. + // "CANCELLED" - Request has finished being cancelled after user + // called + // google.longrunning.Operations.CancelOperation. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1CommonMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1CommonMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1EntityFilter: Identifies a subset of entities +// in a project. This is specified as +// combinations of kinds and namespaces (either or both of which may be +// all, as +// described in the following examples). +// Example usage: +// +// Entire project: +// kinds=[], namespace_ids=[] +// +// Kinds Foo and Bar in all namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=[] +// +// Kinds Foo and Bar only in the default namespace: +// kinds=['Foo', 'Bar'], namespace_ids=[''] +// +// Kinds Foo and Bar in both the default and Baz namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz'] +// +// The entire Baz namespace: +// kinds=[], namespace_ids=['Baz'] +type GoogleDatastoreAdminV1EntityFilter struct { + // Kinds: If empty, then this represents all kinds. + Kinds []string `json:"kinds,omitempty"` + + // NamespaceIds: An empty list represents all namespaces. This is the + // preferred + // usage for projects that don't use namespaces. + // + // An empty string element represents the default namespace. This should + // be + // used if the project has data in non-default namespaces, but doesn't + // want to + // include them. + // Each namespace in this list must be unique. + NamespaceIds []string `json:"namespaceIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kinds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kinds") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1EntityFilter) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1EntityFilter + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesMetadata: Metadata for +// ExportEntities operations. +type GoogleDatastoreAdminV1ExportEntitiesMetadata struct { + // Common: Metadata common to all Datastore Admin operations. + Common *GoogleDatastoreAdminV1CommonMetadata `json:"common,omitempty"` + + // EntityFilter: Description of which entities are being exported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // OutputUrlPrefix: Location for the export metadata and data files. + // This will be the same + // value as + // the + // google.datastore.admin.v1.ExportEntitiesRequest.output_url_prefix + // + // field. The final output location is provided + // in + // google.datastore.admin.v1.ExportEntitiesResponse.output_url. + OutputUrlPrefix string `json:"outputUrlPrefix,omitempty"` + + // ProgressBytes: An estimate of the number of bytes processed. + ProgressBytes *GoogleDatastoreAdminV1Progress `json:"progressBytes,omitempty"` + + // ProgressEntities: An estimate of the number of entities processed. + ProgressEntities *GoogleDatastoreAdminV1Progress `json:"progressEntities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesResponse: The response +// for +// google.datastore.admin.v1.DatastoreAdmin.ExportEntities. +type GoogleDatastoreAdminV1ExportEntitiesResponse struct { + // OutputUrl: Location of the output metadata file. This can be used to + // begin an import + // into Cloud Datastore (this project or another project). + // See + // google.datastore.admin.v1.ImportEntitiesRequest.input_url. + // Only present if the operation completed successfully. + OutputUrl string `json:"outputUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OutputUrl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OutputUrl") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ImportEntitiesMetadata: Metadata for +// ImportEntities operations. +type GoogleDatastoreAdminV1ImportEntitiesMetadata struct { + // Common: Metadata common to all Datastore Admin operations. + Common *GoogleDatastoreAdminV1CommonMetadata `json:"common,omitempty"` + + // EntityFilter: Description of which entities are being imported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // InputUrl: The location of the import metadata file. This will be the + // same value as + // the google.datastore.admin.v1.ExportEntitiesResponse.output_url + // field. + InputUrl string `json:"inputUrl,omitempty"` + + // ProgressBytes: An estimate of the number of bytes processed. + ProgressBytes *GoogleDatastoreAdminV1Progress `json:"progressBytes,omitempty"` + + // ProgressEntities: An estimate of the number of entities processed. + ProgressEntities *GoogleDatastoreAdminV1Progress `json:"progressEntities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ImportEntitiesMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ImportEntitiesMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1Progress: Measures the progress of a particular +// metric. +type GoogleDatastoreAdminV1Progress struct { + // WorkCompleted: The amount of work that has been completed. Note that + // this may be greater + // than work_estimated. + WorkCompleted int64 `json:"workCompleted,omitempty,string"` + + // WorkEstimated: An estimate of how much work needs to be performed. + // May be zero if the + // work estimate is unavailable. + WorkEstimated int64 `json:"workEstimated,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "WorkCompleted") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WorkCompleted") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1Progress) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1Progress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleDatastoreAdminV1beta1CommonMetadata: Metadata common to all // Datastore Admin operations. type GoogleDatastoreAdminV1beta1CommonMetadata struct { diff --git a/vendor/google.golang.org/api/datastore/v1beta3/datastore-api.json b/vendor/google.golang.org/api/datastore/v1beta3/datastore-api.json index 91de08b4e..513795645 100644 --- a/vendor/google.golang.org/api/datastore/v1beta3/datastore-api.json +++ b/vendor/google.golang.org/api/datastore/v1beta3/datastore-api.json @@ -317,7 +317,7 @@ } } }, - "revision": "20180117", + "revision": "20180321", "rootUrl": "https://datastore.googleapis.com/", "schemas": { "AllocateIdsRequest": { @@ -517,6 +517,171 @@ }, "type": "object" }, + "GoogleDatastoreAdminV1CommonMetadata": { + "description": "Metadata common to all Datastore Admin operations.", + "id": "GoogleDatastoreAdminV1CommonMetadata", + "properties": { + "endTime": { + "description": "The time the operation ended, either successfully or otherwise.", + "format": "google-datetime", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "The client-assigned labels which were provided when the operation was\ncreated. May also include additional labels.", + "type": "object" + }, + "operationType": { + "description": "The type of the operation. Can be used as a filter in\nListOperationsRequest.", + "enum": [ + "OPERATION_TYPE_UNSPECIFIED", + "EXPORT_ENTITIES", + "IMPORT_ENTITIES" + ], + "enumDescriptions": [ + "Unspecified.", + "ExportEntities.", + "ImportEntities." + ], + "type": "string" + }, + "startTime": { + "description": "The time that work began on the operation.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "The current state of the Operation.", + "enum": [ + "STATE_UNSPECIFIED", + "INITIALIZING", + "PROCESSING", + "CANCELLING", + "FINALIZING", + "SUCCESSFUL", + "FAILED", + "CANCELLED" + ], + "enumDescriptions": [ + "Unspecified.", + "Request is being prepared for processing.", + "Request is actively being processed.", + "Request is in the process of being cancelled after user called\ngoogle.longrunning.Operations.CancelOperation on the operation.", + "Request has been processed and is in its finalization stage.", + "Request has completed successfully.", + "Request has finished being processed, but encountered an error.", + "Request has finished being cancelled after user called\ngoogle.longrunning.Operations.CancelOperation." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1EntityFilter": { + "description": "Identifies a subset of entities in a project. This is specified as\ncombinations of kinds and namespaces (either or both of which may be all, as\ndescribed in the following examples).\nExample usage:\n\nEntire project:\n kinds=[], namespace_ids=[]\n\nKinds Foo and Bar in all namespaces:\n kinds=['Foo', 'Bar'], namespace_ids=[]\n\nKinds Foo and Bar only in the default namespace:\n kinds=['Foo', 'Bar'], namespace_ids=['']\n\nKinds Foo and Bar in both the default and Baz namespaces:\n kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz']\n\nThe entire Baz namespace:\n kinds=[], namespace_ids=['Baz']", + "id": "GoogleDatastoreAdminV1EntityFilter", + "properties": { + "kinds": { + "description": "If empty, then this represents all kinds.", + "items": { + "type": "string" + }, + "type": "array" + }, + "namespaceIds": { + "description": "An empty list represents all namespaces. This is the preferred\nusage for projects that don't use namespaces.\n\nAn empty string element represents the default namespace. This should be\nused if the project has data in non-default namespaces, but doesn't want to\ninclude them.\nEach namespace in this list must be unique.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesMetadata": { + "description": "Metadata for ExportEntities operations.", + "id": "GoogleDatastoreAdminV1ExportEntitiesMetadata", + "properties": { + "common": { + "$ref": "GoogleDatastoreAdminV1CommonMetadata", + "description": "Metadata common to all Datastore Admin operations." + }, + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of which entities are being exported." + }, + "outputUrlPrefix": { + "description": "Location for the export metadata and data files. This will be the same\nvalue as the\ngoogle.datastore.admin.v1.ExportEntitiesRequest.output_url_prefix\nfield. The final output location is provided in\ngoogle.datastore.admin.v1.ExportEntitiesResponse.output_url.", + "type": "string" + }, + "progressBytes": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of bytes processed." + }, + "progressEntities": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of entities processed." + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ExportEntitiesResponse": { + "description": "The response for\ngoogle.datastore.admin.v1.DatastoreAdmin.ExportEntities.", + "id": "GoogleDatastoreAdminV1ExportEntitiesResponse", + "properties": { + "outputUrl": { + "description": "Location of the output metadata file. This can be used to begin an import\ninto Cloud Datastore (this project or another project). See\ngoogle.datastore.admin.v1.ImportEntitiesRequest.input_url.\nOnly present if the operation completed successfully.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1ImportEntitiesMetadata": { + "description": "Metadata for ImportEntities operations.", + "id": "GoogleDatastoreAdminV1ImportEntitiesMetadata", + "properties": { + "common": { + "$ref": "GoogleDatastoreAdminV1CommonMetadata", + "description": "Metadata common to all Datastore Admin operations." + }, + "entityFilter": { + "$ref": "GoogleDatastoreAdminV1EntityFilter", + "description": "Description of which entities are being imported." + }, + "inputUrl": { + "description": "The location of the import metadata file. This will be the same value as\nthe google.datastore.admin.v1.ExportEntitiesResponse.output_url field.", + "type": "string" + }, + "progressBytes": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of bytes processed." + }, + "progressEntities": { + "$ref": "GoogleDatastoreAdminV1Progress", + "description": "An estimate of the number of entities processed." + } + }, + "type": "object" + }, + "GoogleDatastoreAdminV1Progress": { + "description": "Measures the progress of a particular metric.", + "id": "GoogleDatastoreAdminV1Progress", + "properties": { + "workCompleted": { + "description": "The amount of work that has been completed. Note that this may be greater\nthan work_estimated.", + "format": "int64", + "type": "string" + }, + "workEstimated": { + "description": "An estimate of how much work needs to be performed. May be zero if the\nwork estimate is unavailable.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "GoogleDatastoreAdminV1beta1CommonMetadata": { "description": "Metadata common to all Datastore Admin operations.", "id": "GoogleDatastoreAdminV1beta1CommonMetadata", diff --git a/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go b/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go index fa5c7e939..0fb05f8eb 100644 --- a/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go +++ b/vendor/google.golang.org/api/datastore/v1beta3/datastore-gen.go @@ -511,6 +511,297 @@ func (s *Filter) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleDatastoreAdminV1CommonMetadata: Metadata common to all +// Datastore Admin operations. +type GoogleDatastoreAdminV1CommonMetadata struct { + // EndTime: The time the operation ended, either successfully or + // otherwise. + EndTime string `json:"endTime,omitempty"` + + // Labels: The client-assigned labels which were provided when the + // operation was + // created. May also include additional labels. + Labels map[string]string `json:"labels,omitempty"` + + // OperationType: The type of the operation. Can be used as a filter + // in + // ListOperationsRequest. + // + // Possible values: + // "OPERATION_TYPE_UNSPECIFIED" - Unspecified. + // "EXPORT_ENTITIES" - ExportEntities. + // "IMPORT_ENTITIES" - ImportEntities. + OperationType string `json:"operationType,omitempty"` + + // StartTime: The time that work began on the operation. + StartTime string `json:"startTime,omitempty"` + + // State: The current state of the Operation. + // + // Possible values: + // "STATE_UNSPECIFIED" - Unspecified. + // "INITIALIZING" - Request is being prepared for processing. + // "PROCESSING" - Request is actively being processed. + // "CANCELLING" - Request is in the process of being cancelled after + // user called + // google.longrunning.Operations.CancelOperation on the operation. + // "FINALIZING" - Request has been processed and is in its + // finalization stage. + // "SUCCESSFUL" - Request has completed successfully. + // "FAILED" - Request has finished being processed, but encountered an + // error. + // "CANCELLED" - Request has finished being cancelled after user + // called + // google.longrunning.Operations.CancelOperation. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EndTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EndTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1CommonMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1CommonMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1EntityFilter: Identifies a subset of entities +// in a project. This is specified as +// combinations of kinds and namespaces (either or both of which may be +// all, as +// described in the following examples). +// Example usage: +// +// Entire project: +// kinds=[], namespace_ids=[] +// +// Kinds Foo and Bar in all namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=[] +// +// Kinds Foo and Bar only in the default namespace: +// kinds=['Foo', 'Bar'], namespace_ids=[''] +// +// Kinds Foo and Bar in both the default and Baz namespaces: +// kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz'] +// +// The entire Baz namespace: +// kinds=[], namespace_ids=['Baz'] +type GoogleDatastoreAdminV1EntityFilter struct { + // Kinds: If empty, then this represents all kinds. + Kinds []string `json:"kinds,omitempty"` + + // NamespaceIds: An empty list represents all namespaces. This is the + // preferred + // usage for projects that don't use namespaces. + // + // An empty string element represents the default namespace. This should + // be + // used if the project has data in non-default namespaces, but doesn't + // want to + // include them. + // Each namespace in this list must be unique. + NamespaceIds []string `json:"namespaceIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kinds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kinds") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1EntityFilter) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1EntityFilter + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesMetadata: Metadata for +// ExportEntities operations. +type GoogleDatastoreAdminV1ExportEntitiesMetadata struct { + // Common: Metadata common to all Datastore Admin operations. + Common *GoogleDatastoreAdminV1CommonMetadata `json:"common,omitempty"` + + // EntityFilter: Description of which entities are being exported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // OutputUrlPrefix: Location for the export metadata and data files. + // This will be the same + // value as + // the + // google.datastore.admin.v1.ExportEntitiesRequest.output_url_prefix + // + // field. The final output location is provided + // in + // google.datastore.admin.v1.ExportEntitiesResponse.output_url. + OutputUrlPrefix string `json:"outputUrlPrefix,omitempty"` + + // ProgressBytes: An estimate of the number of bytes processed. + ProgressBytes *GoogleDatastoreAdminV1Progress `json:"progressBytes,omitempty"` + + // ProgressEntities: An estimate of the number of entities processed. + ProgressEntities *GoogleDatastoreAdminV1Progress `json:"progressEntities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ExportEntitiesResponse: The response +// for +// google.datastore.admin.v1.DatastoreAdmin.ExportEntities. +type GoogleDatastoreAdminV1ExportEntitiesResponse struct { + // OutputUrl: Location of the output metadata file. This can be used to + // begin an import + // into Cloud Datastore (this project or another project). + // See + // google.datastore.admin.v1.ImportEntitiesRequest.input_url. + // Only present if the operation completed successfully. + OutputUrl string `json:"outputUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OutputUrl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OutputUrl") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ExportEntitiesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ExportEntitiesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1ImportEntitiesMetadata: Metadata for +// ImportEntities operations. +type GoogleDatastoreAdminV1ImportEntitiesMetadata struct { + // Common: Metadata common to all Datastore Admin operations. + Common *GoogleDatastoreAdminV1CommonMetadata `json:"common,omitempty"` + + // EntityFilter: Description of which entities are being imported. + EntityFilter *GoogleDatastoreAdminV1EntityFilter `json:"entityFilter,omitempty"` + + // InputUrl: The location of the import metadata file. This will be the + // same value as + // the google.datastore.admin.v1.ExportEntitiesResponse.output_url + // field. + InputUrl string `json:"inputUrl,omitempty"` + + // ProgressBytes: An estimate of the number of bytes processed. + ProgressBytes *GoogleDatastoreAdminV1Progress `json:"progressBytes,omitempty"` + + // ProgressEntities: An estimate of the number of entities processed. + ProgressEntities *GoogleDatastoreAdminV1Progress `json:"progressEntities,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Common") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Common") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1ImportEntitiesMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1ImportEntitiesMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleDatastoreAdminV1Progress: Measures the progress of a particular +// metric. +type GoogleDatastoreAdminV1Progress struct { + // WorkCompleted: The amount of work that has been completed. Note that + // this may be greater + // than work_estimated. + WorkCompleted int64 `json:"workCompleted,omitempty,string"` + + // WorkEstimated: An estimate of how much work needs to be performed. + // May be zero if the + // work estimate is unavailable. + WorkEstimated int64 `json:"workEstimated,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "WorkCompleted") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WorkCompleted") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleDatastoreAdminV1Progress) MarshalJSON() ([]byte, error) { + type NoMethod GoogleDatastoreAdminV1Progress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // GoogleDatastoreAdminV1beta1CommonMetadata: Metadata common to all // Datastore Admin operations. type GoogleDatastoreAdminV1beta1CommonMetadata struct { diff --git a/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-api.json b/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-api.json index f8a056a21..bde75f9c7 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-api.json +++ b/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-api.json @@ -24,7 +24,7 @@ "description": "The Deployment Manager API allows users to declaratively configure, deploy and run complex solutions on the Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/deployment-manager/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/6qPLaFfAvg8ZGixVZj1YhdDR2WQ\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/01WYREYbsLfwRWDJQpCucMHT0XA\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -367,7 +367,6 @@ "deployment": { "description": "The name of the deployment for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -466,7 +465,7 @@ "parameters": { "createPolicy": { "default": "CREATE_OR_ACQUIRE", - "description": "", + "description": "Sets the policy to use for creating new resources.", "enum": [ "ACQUIRE", "CREATE", @@ -1651,7 +1650,7 @@ } } }, - "revision": "20180222", + "revision": "20180309", "rootUrl": "https://www.googleapis.com/", "schemas": { "AsyncOptions": { diff --git a/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-gen.go b/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-gen.go index e15bc3f83..f2bf91725 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-gen.go +++ b/vendor/google.golang.org/api/deploymentmanager/v0.alpha/deploymentmanager-gen.go @@ -4323,7 +4323,6 @@ func (c *DeploymentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "deployment": { // "description": "The name of the deployment for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -4670,7 +4669,8 @@ func (r *DeploymentsService) Insert(project string, deployment *Deployment) *Dep return c } -// CreatePolicy sets the optional parameter "createPolicy": +// CreatePolicy sets the optional parameter "createPolicy": Sets the +// policy to use for creating new resources. // // Possible values: // "ACQUIRE" @@ -4790,7 +4790,7 @@ func (c *DeploymentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "parameters": { // "createPolicy": { // "default": "CREATE_OR_ACQUIRE", - // "description": "", + // "description": "Sets the policy to use for creating new resources.", // "enum": [ // "ACQUIRE", // "CREATE", diff --git a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json index 3f69988bc..d54647e36 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json +++ b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-api.json @@ -24,7 +24,7 @@ "description": "Declares, configures, and deploys complex solutions on Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/deployment-manager/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/L_ygoCRkmtwA6-Mj5fjEetgu-hQ\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/e3d_k3vYLbSd_vluuySMj1lei3Y\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -145,7 +145,6 @@ "deployment": { "description": "The name of the deployment for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -242,6 +241,20 @@ "project" ], "parameters": { + "createPolicy": { + "default": "CREATE_OR_ACQUIRE", + "description": "Sets the policy to use for creating new resources.", + "enum": [ + "ACQUIRE", + "CREATE_OR_ACQUIRE" + ], + "enumDescriptions": [ + "", + "" + ], + "location": "query", + "type": "string" + }, "preview": { "description": "If set to true, creates a deployment and creates \"shell\" resources but does not actually instantiate these resources. This allows you to preview what your deployment looks like. After previewing a deployment, you can deploy your resources by making a request with the update() method or you can use the cancelPreview() method to cancel the preview altogether. Note that the deployment will still exist after you cancel the preview and you must separately delete this deployment if you want to remove it.", "location": "query", @@ -919,7 +932,7 @@ } } }, - "revision": "20180222", + "revision": "20180309", "rootUrl": "https://www.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go index 750245102..b96d4b5a0 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go +++ b/vendor/google.golang.org/api/deploymentmanager/v2/deploymentmanager-gen.go @@ -2349,7 +2349,6 @@ func (c *DeploymentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "deployment": { // "description": "The name of the deployment for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -2696,6 +2695,17 @@ func (r *DeploymentsService) Insert(project string, deployment *Deployment) *Dep return c } +// CreatePolicy sets the optional parameter "createPolicy": Sets the +// policy to use for creating new resources. +// +// Possible values: +// "ACQUIRE" +// "CREATE_OR_ACQUIRE" (default) +func (c *DeploymentsInsertCall) CreatePolicy(createPolicy string) *DeploymentsInsertCall { + c.urlParams_.Set("createPolicy", createPolicy) + return c +} + // Preview sets the optional parameter "preview": If set to true, // creates a deployment and creates "shell" resources but does not // actually instantiate these resources. This allows you to preview what @@ -2803,6 +2813,20 @@ func (c *DeploymentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "project" // ], // "parameters": { + // "createPolicy": { + // "default": "CREATE_OR_ACQUIRE", + // "description": "Sets the policy to use for creating new resources.", + // "enum": [ + // "ACQUIRE", + // "CREATE_OR_ACQUIRE" + // ], + // "enumDescriptions": [ + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // }, // "preview": { // "description": "If set to true, creates a deployment and creates \"shell\" resources but does not actually instantiate these resources. This allows you to preview what your deployment looks like. After previewing a deployment, you can deploy your resources by making a request with the update() method or you can use the cancelPreview() method to cancel the preview altogether. Note that the deployment will still exist after you cancel the preview and you must separately delete this deployment if you want to remove it.", // "location": "query", diff --git a/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-api.json b/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-api.json index 89b1fb0aa..df2169872 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-api.json +++ b/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-api.json @@ -24,7 +24,7 @@ "description": "The Deployment Manager API allows users to declaratively configure, deploy and run complex solutions on the Google Cloud Platform.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/deployment-manager/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/FFcZK58zFukKUPKJPqIblWvO068\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/Y2xp_qL157IVoOjU4Jvc6XPGf28\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -367,7 +367,6 @@ "deployment": { "description": "The name of the deployment for this request.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -466,7 +465,7 @@ "parameters": { "createPolicy": { "default": "CREATE_OR_ACQUIRE", - "description": "", + "description": "Sets the policy to use for creating new resources.", "enum": [ "ACQUIRE", "CREATE", @@ -1483,7 +1482,7 @@ } } }, - "revision": "20180222", + "revision": "20180309", "rootUrl": "https://www.googleapis.com/", "schemas": { "AsyncOptions": { diff --git a/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-gen.go b/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-gen.go index 00f31a8b0..61a38977b 100644 --- a/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-gen.go +++ b/vendor/google.golang.org/api/deploymentmanager/v2beta/deploymentmanager-gen.go @@ -4216,7 +4216,6 @@ func (c *DeploymentsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er // "deployment": { // "description": "The name of the deployment for this request.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -4563,7 +4562,8 @@ func (r *DeploymentsService) Insert(project string, deployment *Deployment) *Dep return c } -// CreatePolicy sets the optional parameter "createPolicy": +// CreatePolicy sets the optional parameter "createPolicy": Sets the +// policy to use for creating new resources. // // Possible values: // "ACQUIRE" @@ -4683,7 +4683,7 @@ func (c *DeploymentsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er // "parameters": { // "createPolicy": { // "default": "CREATE_OR_ACQUIRE", - // "description": "", + // "description": "Sets the policy to use for creating new resources.", // "enum": [ // "ACQUIRE", // "CREATE", diff --git a/vendor/google.golang.org/api/dialogflow/v2/dialogflow-api.json b/vendor/google.golang.org/api/dialogflow/v2/dialogflow-api.json new file mode 100644 index 000000000..7f7868f2f --- /dev/null +++ b/vendor/google.golang.org/api/dialogflow/v2/dialogflow-api.json @@ -0,0 +1,4250 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "basePath": "", + "baseUrl": "https://dialogflow.googleapis.com/", + "batchPath": "batch", + "canonicalName": "Dialogflow", + "description": "An end-to-end development suite for conversational interfaces (e.g., chatbots, voice-powered apps and devices).", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/dialogflow-enterprise/", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "dialogflow:v2", + "kind": "discovery#restDescription", + "name": "dialogflow", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "pp": { + "default": "true", + "description": "Pretty-print response.", + "location": "query", + "type": "boolean" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "projects": { + "methods": { + "getAgent": { + "description": "Retrieves the specified agent.", + "flatPath": "v2/projects/{projectsId}/agent", + "httpMethod": "GET", + "id": "dialogflow.projects.getAgent", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project that the agent to fetch is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/agent", + "response": { + "$ref": "GoogleCloudDialogflowV2Agent" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "agent": { + "methods": { + "export": { + "description": "Exports the specified agent to a ZIP file.\n\n\nOperation \u003cresponse: ExportAgentResponse,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent:export", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.export", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project that the agent to export is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/agent:export", + "request": { + "$ref": "GoogleCloudDialogflowV2ExportAgentRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "import": { + "description": "Imports the specified agent from a ZIP file.\n\nUploads new intents and entity types without deleting the existing ones.\nIntents and entity types with the same name are replaced with the new\nversions from ImportAgentRequest.\n\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent:import", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.import", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project that the agent to import is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/agent:import", + "request": { + "$ref": "GoogleCloudDialogflowV2ImportAgentRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "restore": { + "description": "Restores the specified agent from a ZIP file.\n\nReplaces the current agent version with a new one. All the intents and\nentity types in the older version are deleted.\n\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent:restore", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.restore", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project that the agent to restore is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/agent:restore", + "request": { + "$ref": "GoogleCloudDialogflowV2RestoreAgentRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "search": { + "description": "Returns the list of agents.\n\nSince there is at most one conversational agent per project, this method is\nuseful primarily for listing all agents across projects the caller has\naccess to. One can achieve that with a wildcard project collection id \"-\".\nRefer to [List\nSub-Collections](https://cloud.google.com/apis/design/design_patterns#list_sub-collections).", + "flatPath": "v2/projects/{projectsId}/agent:search", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.search", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The project to list agents from.\nFormat: `projects/\u003cProject ID or '-'\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/agent:search", + "response": { + "$ref": "GoogleCloudDialogflowV2SearchAgentsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "train": { + "description": "Trains the specified agent.\n\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent:train", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.train", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The project that the agent to train is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/agent:train", + "request": { + "$ref": "GoogleCloudDialogflowV2TrainAgentRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "entityTypes": { + "methods": { + "batchDelete": { + "description": "Deletes entity types in the specified agent.\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes:batchDelete", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.entityTypes.batchDelete", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the agent to delete all entities types for. Format:\n`projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes:batchDelete", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "batchUpdate": { + "description": "Updates/Creates multiple entity types in the specified agent.\n\nOperation \u003cresponse: BatchUpdateEntityTypesResponse,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes:batchUpdate", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.entityTypes.batchUpdate", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the agent to update or create entity types in.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes:batchUpdate", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "description": "Creates an entity type in the specified agent.", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.entityTypes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "languageCode": { + "description": "Optional. The language of entity synonyms defined in `entity_type`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The agent to create a entity type for.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes", + "request": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the specified entity type.", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.entityTypes.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the entity type to delete.\nFormat: `projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntityType ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the specified entity type.", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.entityTypes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "languageCode": { + "description": "Optional. The language to retrieve entity synonyms for. If not specified,\nthe agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The name of the entity type.\nFormat: `projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntityType ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Returns the list of all entity types in the specified agent.", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.entityTypes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "languageCode": { + "description": "Optional. The language to list entity synonyms for. If not specified,\nthe agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The agent to list all entity types from.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes", + "response": { + "$ref": "GoogleCloudDialogflowV2ListEntityTypesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the specified entity type.", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}", + "httpMethod": "PATCH", + "id": "dialogflow.projects.agent.entityTypes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "languageCode": { + "description": "Optional. The language of entity synonyms defined in `entity_type`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of the entity type. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "entities": { + "methods": { + "batchCreate": { + "description": "Creates multiple new entities in the specified entity type (extends the\nexisting collection of entries).\n\nOperation \u003cresponse: google.protobuf.Empty\u003e", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}/entities:batchCreate", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.entityTypes.entities.batchCreate", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the entity type to create entities in. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entities:batchCreate", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchCreateEntitiesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "batchDelete": { + "description": "Deletes entities in the specified entity type.\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}/entities:batchDelete", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.entityTypes.entities.batchDelete", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the entity type to delete entries for. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entities:batchDelete", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchDeleteEntitiesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "batchUpdate": { + "description": "Updates entities in the specified entity type (replaces the existing\ncollection of entries).\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}/entities:batchUpdate", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.entityTypes.entities.batchUpdate", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the entity type to update the entities in. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entities:batchUpdate", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchUpdateEntitiesRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "intents": { + "methods": { + "batchDelete": { + "description": "Deletes intents in the specified agent.\n\nOperation \u003cresponse: google.protobuf.Empty\u003e", + "flatPath": "v2/projects/{projectsId}/agent/intents:batchDelete", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.intents.batchDelete", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the agent to delete all entities types for. Format:\n`projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/intents:batchDelete", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchDeleteIntentsRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "batchUpdate": { + "description": "Updates/Creates multiple intents in the specified agent.\n\nOperation \u003cresponse: BatchUpdateIntentsResponse\u003e", + "flatPath": "v2/projects/{projectsId}/agent/intents:batchUpdate", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.intents.batchUpdate", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the agent to update or create intents in.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/intents:batchUpdate", + "request": { + "$ref": "GoogleCloudDialogflowV2BatchUpdateIntentsRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "description": "Creates an intent in the specified agent.", + "flatPath": "v2/projects/{projectsId}/agent/intents", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.intents.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "intentView": { + "description": "Optional. The resource view to apply to the returned intent.", + "enum": [ + "INTENT_VIEW_UNSPECIFIED", + "INTENT_VIEW_FULL" + ], + "location": "query", + "type": "string" + }, + "languageCode": { + "description": "Optional. The language of training phrases, parameters and rich messages\ndefined in `intent`. If not specified, the agent's default language is\nused. [More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The agent to create a intent for.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/intents", + "request": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the specified intent.", + "flatPath": "v2/projects/{projectsId}/agent/intents/{intentsId}", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.intents.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the intent to delete.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/intents/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the specified intent.", + "flatPath": "v2/projects/{projectsId}/agent/intents/{intentsId}", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.intents.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "intentView": { + "description": "Optional. The resource view to apply to the returned intent.", + "enum": [ + "INTENT_VIEW_UNSPECIFIED", + "INTENT_VIEW_FULL" + ], + "location": "query", + "type": "string" + }, + "languageCode": { + "description": "Optional. The language to retrieve training phrases, parameters and rich\nmessages for. If not specified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required. The name of the intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/intents/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Returns the list of all intents in the specified agent.", + "flatPath": "v2/projects/{projectsId}/agent/intents", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.intents.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "intentView": { + "description": "Optional. The resource view to apply to the returned intent.", + "enum": [ + "INTENT_VIEW_UNSPECIFIED", + "INTENT_VIEW_FULL" + ], + "location": "query", + "type": "string" + }, + "languageCode": { + "description": "Optional. The language to list training phrases, parameters and rich\nmessages for. If not specified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent before they can be used.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The agent to list all intents from.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + "location": "path", + "pattern": "^projects/[^/]+/agent$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/intents", + "response": { + "$ref": "GoogleCloudDialogflowV2ListIntentsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the specified intent.", + "flatPath": "v2/projects/{projectsId}/agent/intents/{intentsId}", + "httpMethod": "PATCH", + "id": "dialogflow.projects.agent.intents.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "intentView": { + "description": "Optional. The resource view to apply to the returned intent.", + "enum": [ + "INTENT_VIEW_UNSPECIFIED", + "INTENT_VIEW_FULL" + ], + "location": "query", + "type": "string" + }, + "languageCode": { + "description": "Optional. The language of training phrases, parameters and rich messages\ndefined in `intent`. If not specified, the agent's default language is\nused. [More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of this intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "location": "path", + "pattern": "^projects/[^/]+/agent/intents/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "runtimes": { + "resources": { + "sessions": { + "methods": { + "deleteContexts": { + "description": "Deletes all active contexts in the specified session.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.runtimes.sessions.deleteContexts", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the session to delete all contexts from. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or `projects/\u003cProject\nID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`. Note: Runtimes are\nunder construction and will be available soon. If \u003cRuntime ID\u003e is not\nspecified we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/contexts", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "detectIntent": { + "description": "Processes a natural language query and returns structured, actionable data\nas a result. This method is not idempotent, because it may cause contexts\nand session entity types to be updated, which in turn might affect\nresults of future queries.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}:detectIntent", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.runtimes.sessions.detectIntent", + "parameterOrder": [ + "session" + ], + "parameters": { + "session": { + "description": "Required. The name of the session this query is sent to. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.\nIt's up to the API caller to choose an appropriate session ID. It can be\na random number or some type of user identifier (preferably hashed).\nThe length of the session ID must not exceed 36 bytes.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+session}:detectIntent", + "request": { + "$ref": "GoogleCloudDialogflowV2DetectIntentRequest" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2DetectIntentResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "contexts": { + "methods": { + "create": { + "description": "Creates a context.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.runtimes.sessions.contexts.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The session to create a context for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/contexts", + "request": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the specified context.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts/{contextsId}", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.runtimes.sessions.contexts.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the context to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/contexts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the specified context.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts/{contextsId}", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.runtimes.sessions.contexts.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/contexts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Returns the list of all contexts in the specified session.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.runtimes.sessions.contexts.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The session to list all contexts from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/contexts", + "response": { + "$ref": "GoogleCloudDialogflowV2ListContextsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the specified context.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts/{contextsId}", + "httpMethod": "PATCH", + "id": "dialogflow.projects.agent.runtimes.sessions.contexts.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The unique identifier of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`,\nor\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nThe Context ID is always converted to lowercase.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/contexts/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "entityTypes": { + "methods": { + "create": { + "description": "Creates a session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The session to create a session entity type for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes", + "request": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the specified session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes/{entityTypesId}", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the entity type to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the specified session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes/{entityTypesId}", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Returns the list of all session entity types in the specified session.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The session to list all session entity types from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes", + "response": { + "$ref": "GoogleCloudDialogflowV2ListSessionEntityTypesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the specified session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes/{entityTypesId}", + "httpMethod": "PATCH", + "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The unique identifier of this session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003esessions/\u003cSession\nID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/entityTypes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + }, + "sessions": { + "methods": { + "deleteContexts": { + "description": "Deletes all active contexts in the specified session.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.sessions.deleteContexts", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The name of the session to delete all contexts from. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or `projects/\u003cProject\nID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`. Note: Runtimes are\nunder construction and will be available soon. If \u003cRuntime ID\u003e is not\nspecified we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/contexts", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "detectIntent": { + "description": "Processes a natural language query and returns structured, actionable data\nas a result. This method is not idempotent, because it may cause contexts\nand session entity types to be updated, which in turn might affect\nresults of future queries.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}:detectIntent", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.sessions.detectIntent", + "parameterOrder": [ + "session" + ], + "parameters": { + "session": { + "description": "Required. The name of the session this query is sent to. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.\nIt's up to the API caller to choose an appropriate session ID. It can be\na random number or some type of user identifier (preferably hashed).\nThe length of the session ID must not exceed 36 bytes.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+session}:detectIntent", + "request": { + "$ref": "GoogleCloudDialogflowV2DetectIntentRequest" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2DetectIntentResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + }, + "resources": { + "contexts": { + "methods": { + "create": { + "description": "Creates a context.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.sessions.contexts.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The session to create a context for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/contexts", + "request": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the specified context.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts/{contextsId}", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.sessions.contexts.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the context to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+/contexts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the specified context.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts/{contextsId}", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.sessions.contexts.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+/contexts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Returns the list of all contexts in the specified session.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.sessions.contexts.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The session to list all contexts from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/contexts", + "response": { + "$ref": "GoogleCloudDialogflowV2ListContextsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the specified context.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts/{contextsId}", + "httpMethod": "PATCH", + "id": "dialogflow.projects.agent.sessions.contexts.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The unique identifier of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`,\nor\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nThe Context ID is always converted to lowercase.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+/contexts/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "entityTypes": { + "methods": { + "create": { + "description": "Creates a session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes", + "httpMethod": "POST", + "id": "dialogflow.projects.agent.sessions.entityTypes.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "Required. The session to create a session entity type for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes", + "request": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes the specified session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes/{entityTypesId}", + "httpMethod": "DELETE", + "id": "dialogflow.projects.agent.sessions.entityTypes.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the entity type to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Retrieves the specified session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes/{entityTypesId}", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.sessions.entityTypes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The name of the session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+/entityTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Returns the list of all session entity types in the specified session.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes", + "httpMethod": "GET", + "id": "dialogflow.projects.agent.sessions.entityTypes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. The next_page_token value returned from a previous list request.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "Required. The session to list all session entity types from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/entityTypes", + "response": { + "$ref": "GoogleCloudDialogflowV2ListSessionEntityTypesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the specified session entity type.", + "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes/{entityTypesId}", + "httpMethod": "PATCH", + "id": "dialogflow.projects.agent.sessions.entityTypes.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. The unique identifier of this session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003esessions/\u003cSession\nID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "location": "path", + "pattern": "^projects/[^/]+/agent/sessions/[^/]+/entityTypes/[^/]+$", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "response": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + } + }, + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "flatPath": "v2/projects/{projectsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "dialogflow.projects.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^projects/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + }, + "revision": "20180317", + "rootUrl": "https://dialogflow.googleapis.com/", + "schemas": { + "GoogleCloudDialogflowV2Agent": { + "description": "Represents a conversational agent.", + "id": "GoogleCloudDialogflowV2Agent", + "properties": { + "avatarUri": { + "description": "Optional. The URI of the agent's avatar.\nAvatars are used throughout API.AI console and in the self-hosted\n[Web Demo](https://dialogflow.com/docs/integrations/web-demo) integration.", + "type": "string" + }, + "classificationThreshold": { + "description": "Optional. To filter out false positive results and still get variety in\nmatched natural language inputs for your agent, you can tune the machine\nlearning classification threshold. If the returned score value is less than\nthe threshold value, then a fallback intent is be triggered or, if there\nare no fallback intents defined, no intent will be triggered. The score\nvalues range from 0.0 (completely uncertain) to 1.0 (completely certain).\nIf set to 0.0, the default of 0.3 is used.", + "format": "float", + "type": "number" + }, + "defaultLanguageCode": { + "description": "Required. The default language of the agent as a language tag. See\n[Language Support](https://dialogflow.com/docs/reference/language) for a\nlist of the currently supported language codes.\nThis field cannot be set by the `Update` method.", + "type": "string" + }, + "description": { + "description": "Optional. The description of this agent.\nThe maximum length is 500 characters. If exceeded, the request is rejected.", + "type": "string" + }, + "displayName": { + "description": "Required. The name of this agent.", + "type": "string" + }, + "enableLogging": { + "description": "Optional. Determines whether this agent should log conversation queries.", + "type": "boolean" + }, + "matchMode": { + "description": "Optional. Determines how intents are detected from user queries.", + "enum": [ + "MATCH_MODE_UNSPECIFIED", + "MATCH_MODE_HYBRID", + "MATCH_MODE_ML_ONLY" + ], + "enumDescriptions": [ + "Not specified.", + "Best for agents with a small number of examples in intents and/or wide\nuse of templates syntax and composite entities.", + "Can be used for agents with a large number of examples in intents,\nespecially the ones using @sys.any or very large developer entities." + ], + "type": "string" + }, + "parent": { + "description": "Required. The project of this agent.\nFormat: `projects/\u003cProject ID\u003e`.", + "type": "string" + }, + "supportedLanguageCodes": { + "description": "Optional. The list of all languages supported by this agent (except for the\n`default_language_code`).", + "items": { + "type": "string" + }, + "type": "array" + }, + "timeZone": { + "description": "Required. The time zone of this agent from the\n[time zone database](https://www.iana.org/time-zones), e.g.,\nAmerica/New_York, Europe/Paris.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchCreateEntitiesRequest": { + "description": "The request message for EntityTypes.BatchCreateEntities.", + "id": "GoogleCloudDialogflowV2BatchCreateEntitiesRequest", + "properties": { + "entities": { + "description": "Required. The collection of entities to create.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityTypeEntity" + }, + "type": "array" + }, + "languageCode": { + "description": "Optional. The language of entity synonyms defined in `entities`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchDeleteEntitiesRequest": { + "description": "The request message for EntityTypes.BatchDeleteEntities.", + "id": "GoogleCloudDialogflowV2BatchDeleteEntitiesRequest", + "properties": { + "entityValues": { + "description": "Required. The canonical `values` of the entities to delete. Note that\nthese are not fully-qualified names, i.e. they don't start with\n`projects/\u003cProject ID\u003e`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "languageCode": { + "description": "Optional. The language of entity synonyms defined in `entities`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest": { + "description": "The request message for EntityTypes.BatchDeleteEntityTypes.", + "id": "GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest", + "properties": { + "entityTypeNames": { + "description": "Required. The names entity types to delete. All names must point to the\nsame agent as `parent`.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchDeleteIntentsRequest": { + "description": "The request message for Intents.BatchDeleteIntents.", + "id": "GoogleCloudDialogflowV2BatchDeleteIntentsRequest", + "properties": { + "intents": { + "description": "Required. The collection of intents to delete. Only intent `name` must be\nfilled in.", + "items": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchUpdateEntitiesRequest": { + "description": "The response message for EntityTypes.BatchCreateEntities.", + "id": "GoogleCloudDialogflowV2BatchUpdateEntitiesRequest", + "properties": { + "entities": { + "description": "Required. The collection of new entities to replace the existing entities.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityTypeEntity" + }, + "type": "array" + }, + "languageCode": { + "description": "Optional. The language of entity synonyms defined in `entities`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest": { + "description": "The request message for EntityTypes.BatchUpdateEntityTypes.", + "id": "GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest", + "properties": { + "entityTypeBatchInline": { + "$ref": "GoogleCloudDialogflowV2EntityTypeBatch", + "description": "The collection of entity type to update or create." + }, + "entityTypeBatchUri": { + "description": "The URI to a Google Cloud Storage file containing entity types to update\nor create. The file format can either be a serialized proto (of\nEntityBatch type) or a JSON object. Note: The URI must start with\n\"gs://\".", + "type": "string" + }, + "languageCode": { + "description": "Optional. The language of entity synonyms defined in `entity_types`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchUpdateEntityTypesResponse": { + "description": "The response message for EntityTypes.BatchUpdateEntityTypes.", + "id": "GoogleCloudDialogflowV2BatchUpdateEntityTypesResponse", + "properties": { + "entityTypes": { + "description": "The collection of updated or created entity types.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchUpdateIntentsRequest": { + "description": "The request message for Intents.BatchUpdateIntents.", + "id": "GoogleCloudDialogflowV2BatchUpdateIntentsRequest", + "properties": { + "intentBatchInline": { + "$ref": "GoogleCloudDialogflowV2IntentBatch", + "description": "The collection of intents to update or create." + }, + "intentBatchUri": { + "description": "The URI to a Google Cloud Storage file containing intents to update or\ncreate. The file format can either be a serialized proto (of IntentBatch\ntype) or JSON object. Note: The URI must start with \"gs://\".", + "type": "string" + }, + "intentView": { + "description": "Optional. The resource view to apply to the returned intent.", + "enum": [ + "INTENT_VIEW_UNSPECIFIED", + "INTENT_VIEW_FULL" + ], + "enumDescriptions": [ + "Training phrases field is not populated in the response.", + "All fields are populated." + ], + "type": "string" + }, + "languageCode": { + "description": "Optional. The language of training phrases, parameters and rich messages\ndefined in `intents`. If not specified, the agent's default language is\nused. [More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + "type": "string" + }, + "updateMask": { + "description": "Optional. The mask to control which fields get updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2BatchUpdateIntentsResponse": { + "description": "The response message for Intents.BatchUpdateIntents.", + "id": "GoogleCloudDialogflowV2BatchUpdateIntentsResponse", + "properties": { + "intents": { + "description": "The collection of updated or created intents.", + "items": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2Context": { + "description": "Represents a context.", + "id": "GoogleCloudDialogflowV2Context", + "properties": { + "lifespanCount": { + "description": "Optional. The number of conversational query requests after which the\ncontext expires. If set to `0` (the default) the context expires\nimmediately. Contexts expire automatically after 10 minutes even if there\nare no matching queries.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "Required. The unique identifier of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`,\nor\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nThe Context ID is always converted to lowercase.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "type": "string" + }, + "parameters": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. The collection of parameters associated with this context.\nRefer to [this doc](https://dialogflow.com/docs/actions-and-parameters) for\nsyntax.", + "type": "object" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2DetectIntentRequest": { + "description": "The request to detect user's intent.", + "id": "GoogleCloudDialogflowV2DetectIntentRequest", + "properties": { + "inputAudio": { + "description": "Optional. The natural language speech audio to be processed. This field\nshould be populated iff `query_input` is set to an input audio config.\nA single request can contain up to 1 minute of speech audio data.", + "format": "byte", + "type": "string" + }, + "queryInput": { + "$ref": "GoogleCloudDialogflowV2QueryInput", + "description": "Required. The input specification. It can be set to:\n\n1. an audio config\n which instructs the speech recognizer how to process the speech audio,\n\n2. a conversational query in the form of text, or\n\n3. an event that specifies which intent to trigger." + }, + "queryParams": { + "$ref": "GoogleCloudDialogflowV2QueryParameters", + "description": "Optional. The parameters of this query." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2DetectIntentResponse": { + "description": "The message returned from the DetectIntent method.", + "id": "GoogleCloudDialogflowV2DetectIntentResponse", + "properties": { + "queryResult": { + "$ref": "GoogleCloudDialogflowV2QueryResult", + "description": "The results of the conversational query or event processing." + }, + "responseId": { + "description": "The unique identifier of the response. It can be used to\nlocate a response in the training example set or for reporting issues.", + "type": "string" + }, + "webhookStatus": { + "$ref": "GoogleRpcStatus", + "description": "Specifies the status of the webhook request. `webhook_status`\nis never populated in webhook requests." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2EntityType": { + "description": "Represents an entity type.\nEntity types serve as a tool for extracting parameter values from natural\nlanguage queries.", + "id": "GoogleCloudDialogflowV2EntityType", + "properties": { + "autoExpansionMode": { + "description": "Optional. Indicates whether the entity type can be automatically\nexpanded.", + "enum": [ + "AUTO_EXPANSION_MODE_UNSPECIFIED", + "AUTO_EXPANSION_MODE_DEFAULT" + ], + "enumDescriptions": [ + "Auto expansion disabled for the entity.", + "Allows an agent to recognize values that have not been explicitly\nlisted in the entity." + ], + "type": "string" + }, + "displayName": { + "description": "Required. The name of the entity.", + "type": "string" + }, + "entities": { + "description": "Optional. The collection of entities associated with the entity type.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityTypeEntity" + }, + "type": "array" + }, + "kind": { + "description": "Required. Indicates the kind of entity type.", + "enum": [ + "KIND_UNSPECIFIED", + "KIND_MAP", + "KIND_LIST" + ], + "enumDescriptions": [ + "Not specified. This value should be never used.", + "Map entity types allow mapping of a group of synonyms to a canonical\nvalue.", + "List entity types contain a set of entries that do not map to canonical\nvalues. However, list entity types can contain references to other entity\ntypes (with or without aliases)." + ], + "type": "string" + }, + "name": { + "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of the entity type. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2EntityTypeBatch": { + "description": "This message is a wrapper around a collection of entity types.", + "id": "GoogleCloudDialogflowV2EntityTypeBatch", + "properties": { + "entityTypes": { + "description": "A collection of entity types.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2EntityTypeEntity": { + "description": "Optional. Represents an entity.", + "id": "GoogleCloudDialogflowV2EntityTypeEntity", + "properties": { + "synonyms": { + "description": "Required. A collection of synonyms. For `KIND_LIST` entity types this\nmust contain exactly one synonym equal to `value`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "value": { + "description": "Required.\nFor `KIND_MAP` entity types:\n A canonical name to be used in place of synonyms.\nFor `KIND_LIST` entity types:\n A string that can contain references to other entity types (with or\n without aliases).", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2EventInput": { + "description": "Events allow for matching intents by event name instead of the natural\nlanguage input. For instance, input `\u003cevent: { name: “welcome_event”,\nparameters: { name: “Sam” } }\u003e` can trigger a personalized welcome response.\nThe parameter `name` may be used by the agent in the response:\n`“Hello #welcome_event.name! What can I do for you today?”`.", + "id": "GoogleCloudDialogflowV2EventInput", + "properties": { + "languageCode": { + "description": "Required. The language of this query. See [Language\nSupport](https://dialogflow.com/docs/languages) for a list of the\ncurrently supported language codes. Note that queries in the same session\ndo not necessarily need to specify the same language.", + "type": "string" + }, + "name": { + "description": "Required. The unique identifier of the event.", + "type": "string" + }, + "parameters": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. The collection of parameters associated with the event.", + "type": "object" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ExportAgentRequest": { + "description": "The request message for Agents.ExportAgent.", + "id": "GoogleCloudDialogflowV2ExportAgentRequest", + "properties": { + "agentUri": { + "description": "Optional. The Google Cloud Storage URI to export the agent to.\nNote: The URI must start with\n\"gs://\". If left unspecified, the serialized agent is returned inline.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ExportAgentResponse": { + "description": "The response message for Agents.ExportAgent.", + "id": "GoogleCloudDialogflowV2ExportAgentResponse", + "properties": { + "agentContent": { + "description": "The exported agent.\n\nExample for how to export an agent to a zip file via a command line:\n\ncurl \\\n 'https://dialogflow.googleapis.com/v2/projects/\u003cproject_name\u003e/agent:export'\\\n -X POST \\\n -H 'Authorization: Bearer '$(gcloud auth print-access-token) \\\n -H 'Accept: application/json' \\\n -H 'Content-Type: application/json' \\\n --compressed \\\n --data-binary '{}' \\\n| grep agentContent | sed -e 's/.*\"agentContent\": \"\\([^\"]*\\)\".*/\\1/' \\\n| base64 --decode \u003e \u003cagent zip file\u003e", + "format": "byte", + "type": "string" + }, + "agentUri": { + "description": "The URI to a file containing the exported agent. This field is populated\nonly if `agent_uri` is specified in `ExportAgentRequest`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ImportAgentRequest": { + "description": "The request message for Agents.ImportAgent.", + "id": "GoogleCloudDialogflowV2ImportAgentRequest", + "properties": { + "agentContent": { + "description": "The agent to import.\n\nExample for how to import an agent via the command line:\n\ncurl \\\n 'https://dialogflow.googleapis.com/v2/projects/\u003cproject_name\u003e/agent:import\\\n -X POST \\\n -H 'Authorization: Bearer '$(gcloud auth print-access-token) \\\n -H 'Accept: application/json' \\\n -H 'Content-Type: application/json' \\\n --compressed \\\n --data-binary \"{\n 'agentContent': '$(cat \u003cagent zip file\u003e | base64 -w 0)'\n }\"", + "format": "byte", + "type": "string" + }, + "agentUri": { + "description": "The URI to a Google Cloud Storage file containing the agent to import.\nNote: The URI must start with \"gs://\".", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2InputAudioConfig": { + "description": "Instructs the speech recognizer how to process the audio content.", + "id": "GoogleCloudDialogflowV2InputAudioConfig", + "properties": { + "audioEncoding": { + "description": "Required. Audio encoding of the audio content to process.", + "enum": [ + "AUDIO_ENCODING_UNSPECIFIED", + "AUDIO_ENCODING_LINEAR_16", + "AUDIO_ENCODING_FLAC", + "AUDIO_ENCODING_MULAW", + "AUDIO_ENCODING_AMR", + "AUDIO_ENCODING_AMR_WB", + "AUDIO_ENCODING_OGG_OPUS", + "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE" + ], + "enumDescriptions": [ + "Not specified.", + "Uncompressed 16-bit signed little-endian samples (Linear PCM).", + "[`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio\nCodec) is the recommended encoding because it is lossless (therefore\nrecognition is not compromised) and requires only about half the\nbandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit and\n24-bit samples, however, not all fields in `STREAMINFO` are supported.", + "8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.", + "Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.", + "Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.", + "Opus encoded audio frames in Ogg container\n([OggOpus](https://wiki.xiph.org/OggOpus)).\n`sample_rate_hertz` must be 16000.", + "Although the use of lossy encodings is not recommended, if a very low\nbitrate encoding is required, `OGG_OPUS` is highly preferred over\nSpeex encoding. The [Speex](https://speex.org/) encoding supported by\nDialogflow API has a header byte in each block, as in MIME type\n`audio/x-speex-with-header-byte`.\nIt is a variant of the RTP Speex encoding defined in\n[RFC 5574](https://tools.ietf.org/html/rfc5574).\nThe stream is a sequence of blocks, one block per RTP packet. Each block\nstarts with a byte containing the length of the block, in bytes, followed\nby one or more frames of Speex data, padded to an integral number of\nbytes (octets) as specified in RFC 5574. In other words, each RTP header\nis replaced with a single byte containing the block length. Only Speex\nwideband is supported. `sample_rate_hertz` must be 16000." + ], + "type": "string" + }, + "languageCode": { + "description": "Required. The language of the supplied audio. Dialogflow does not do\ntranslations. See [Language\nSupport](https://dialogflow.com/docs/languages) for a list of the\ncurrently supported language codes. Note that queries in the same session\ndo not necessarily need to specify the same language.", + "type": "string" + }, + "phraseHints": { + "description": "Optional. The collection of phrase hints which are used to boost accuracy\nof speech recognition.\nRefer to [Cloud Speech API documentation](/speech/docs/basics#phrase-hints)\nfor more details.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sampleRateHertz": { + "description": "Required. Sample rate (in Hertz) of the audio content sent in the query.\nRefer to [Cloud Speech API documentation](/speech/docs/basics) for more\ndetails.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2Intent": { + "description": "Represents an intent.\nIntents convert a number of user expressions or patterns into an action. An\naction is an extraction of a user command or sentence semantics.", + "id": "GoogleCloudDialogflowV2Intent", + "properties": { + "action": { + "description": "Optional. The name of the action associated with the intent.", + "type": "string" + }, + "defaultResponsePlatforms": { + "description": "Optional. The list of platforms for which the first response will be\ntaken from among the messages assigned to the DEFAULT_PLATFORM.", + "enumDescriptions": [ + "Not specified.", + "Facebook.", + "Slack.", + "Telegram.", + "Kik.", + "Skype.", + "Line.", + "Viber.", + "Actions on Google." + ], + "items": { + "enum": [ + "PLATFORM_UNSPECIFIED", + "FACEBOOK", + "SLACK", + "TELEGRAM", + "KIK", + "SKYPE", + "LINE", + "VIBER", + "ACTIONS_ON_GOOGLE" + ], + "type": "string" + }, + "type": "array" + }, + "displayName": { + "description": "Required. The name of this intent.", + "type": "string" + }, + "events": { + "description": "Optional. The collection of event names that trigger the intent.\nIf the collection of input contexts is not empty, all of the contexts must\nbe present in the active user session for an event to trigger this intent.", + "items": { + "type": "string" + }, + "type": "array" + }, + "followupIntentInfo": { + "description": "Optional. Collection of information about all followup intents that have\nname of this intent as a root_name.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentFollowupIntentInfo" + }, + "type": "array" + }, + "inputContextNames": { + "description": "Optional. The list of context names required for this intent to be\ntriggered.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/-/contexts/\u003cContext ID\u003e`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "isFallback": { + "description": "Optional. Indicates whether this is a fallback intent.", + "type": "boolean" + }, + "messages": { + "description": "Optional. The collection of rich messages corresponding to the\n`Response` field in API.AI console.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessage" + }, + "type": "array" + }, + "mlDisabled": { + "description": "Optional. Indicates whether Machine Learning is disabled for the intent.\nNote: If `ml_diabled` setting is set to true, then this intent is not\ntaken into account during inference in `ML ONLY` match mode. Also,\nauto-markup in the UI is turned off.", + "type": "boolean" + }, + "name": { + "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of this intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "outputContexts": { + "description": "Optional. The collection of contexts that are activated when the intent\nis matched. Context messages in this collection should not set the\nparameters field. Setting the `lifespan_count` to 0 will reset the context\nwhen the intent is matched.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/-/contexts/\u003cContext ID\u003e`.", + "items": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "type": "array" + }, + "parameters": { + "description": "Optional. The collection of parameters associated with the intent.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentParameter" + }, + "type": "array" + }, + "parentFollowupIntentName": { + "description": "The unique identifier of the parent intent in the chain of followup\nintents.\nIt identifies the parent followup intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "priority": { + "description": "Optional. The priority of this intent. Higher numbers represent higher\npriorities. Zero or negative numbers mean that the intent is disabled.", + "format": "int32", + "type": "integer" + }, + "resetContexts": { + "description": "Optional. Indicates whether to delete all contexts in the current\nsession when this intent is matched.", + "type": "boolean" + }, + "rootFollowupIntentName": { + "description": "The unique identifier of the root intent in the chain of followup intents.\nIt identifies the correct followup intents chain for this intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "trainingPhrases": { + "description": "Optional. The collection of examples/templates that the agent is\ntrained on.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentTrainingPhrase" + }, + "type": "array" + }, + "webhookState": { + "description": "Required. Indicates whether webhooks are enabled for the intent.", + "enum": [ + "WEBHOOK_STATE_UNSPECIFIED", + "WEBHOOK_STATE_ENABLED", + "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING" + ], + "enumDescriptions": [ + "Webhook is disabled in the agent and in the intent.", + "Webhook is enabled in the agent and in the intent.", + "Webhook is enabled in the agent and in the intent. Also, each slot\nfilling prompt is forwarded to the webhook." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentBatch": { + "description": "This message is a wrapper around a collection of intents.", + "id": "GoogleCloudDialogflowV2IntentBatch", + "properties": { + "intents": { + "description": "A collection of intents.", + "items": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentFollowupIntentInfo": { + "description": "Represents a single followup intent in the chain.", + "id": "GoogleCloudDialogflowV2IntentFollowupIntentInfo", + "properties": { + "followupIntentName": { + "description": "The unique identifier of the followup intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "parentFollowupIntentName": { + "description": "The unique identifier of the followup intent parent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessage": { + "description": "Corresponds to the `Response` field in API.AI console.", + "id": "GoogleCloudDialogflowV2IntentMessage", + "properties": { + "basicCard": { + "$ref": "GoogleCloudDialogflowV2IntentMessageBasicCard", + "description": "The basic card response for Actions on Google." + }, + "card": { + "$ref": "GoogleCloudDialogflowV2IntentMessageCard", + "description": "The card response." + }, + "carouselSelect": { + "$ref": "GoogleCloudDialogflowV2IntentMessageCarouselSelect", + "description": "The carousel card response for Actions on Google." + }, + "image": { + "$ref": "GoogleCloudDialogflowV2IntentMessageImage", + "description": "The image response." + }, + "linkOutSuggestion": { + "$ref": "GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion", + "description": "The link out suggestion chip for Actions on Google." + }, + "listSelect": { + "$ref": "GoogleCloudDialogflowV2IntentMessageListSelect", + "description": "The list card response for Actions on Google." + }, + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The response containing a custom payload.", + "type": "object" + }, + "platform": { + "description": "Optional. The platform that this message is intended for.", + "enum": [ + "PLATFORM_UNSPECIFIED", + "FACEBOOK", + "SLACK", + "TELEGRAM", + "KIK", + "SKYPE", + "LINE", + "VIBER", + "ACTIONS_ON_GOOGLE" + ], + "enumDescriptions": [ + "Not specified.", + "Facebook.", + "Slack.", + "Telegram.", + "Kik.", + "Skype.", + "Line.", + "Viber.", + "Actions on Google." + ], + "type": "string" + }, + "quickReplies": { + "$ref": "GoogleCloudDialogflowV2IntentMessageQuickReplies", + "description": "The quick replies response." + }, + "simpleResponses": { + "$ref": "GoogleCloudDialogflowV2IntentMessageSimpleResponses", + "description": "The voice and text-only responses for Actions on Google." + }, + "suggestions": { + "$ref": "GoogleCloudDialogflowV2IntentMessageSuggestions", + "description": "The suggestion chips for Actions on Google." + }, + "text": { + "$ref": "GoogleCloudDialogflowV2IntentMessageText", + "description": "The text response." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageBasicCard": { + "description": "The basic card message. Useful for displaying information.", + "id": "GoogleCloudDialogflowV2IntentMessageBasicCard", + "properties": { + "buttons": { + "description": "Optional. The collection of card buttons.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessageBasicCardButton" + }, + "type": "array" + }, + "formattedText": { + "description": "Required, unless image is present. The body text of the card.", + "type": "string" + }, + "image": { + "$ref": "GoogleCloudDialogflowV2IntentMessageImage", + "description": "Optional. The image for the card." + }, + "subtitle": { + "description": "Optional. The subtitle of the card.", + "type": "string" + }, + "title": { + "description": "Optional. The title of the card.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageBasicCardButton": { + "description": "The button object that appears at the bottom of a card.", + "id": "GoogleCloudDialogflowV2IntentMessageBasicCardButton", + "properties": { + "openUriAction": { + "$ref": "GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction", + "description": "Required. Action to take when a user taps on the button." + }, + "title": { + "description": "Required. The title of the button.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction": { + "description": "Opens the given URI.", + "id": "GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction", + "properties": { + "uri": { + "description": "Required. The HTTP or HTTPS scheme URI.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageCard": { + "description": "The card response message.", + "id": "GoogleCloudDialogflowV2IntentMessageCard", + "properties": { + "buttons": { + "description": "Optional. The collection of card buttons.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessageCardButton" + }, + "type": "array" + }, + "imageUri": { + "description": "Optional. The public URI to an image file for the card.", + "type": "string" + }, + "subtitle": { + "description": "Optional. The subtitle of the card.", + "type": "string" + }, + "title": { + "description": "Optional. The title of the card.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageCardButton": { + "description": "Optional. Contains information about a button.", + "id": "GoogleCloudDialogflowV2IntentMessageCardButton", + "properties": { + "postback": { + "description": "Optional. The text to send back to the Dialogflow API or a URI to\nopen.", + "type": "string" + }, + "text": { + "description": "Optional. The text to show on the button.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageCarouselSelect": { + "description": "The card for presenting a carousel of options to select from.", + "id": "GoogleCloudDialogflowV2IntentMessageCarouselSelect", + "properties": { + "items": { + "description": "Required. Carousel items.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessageCarouselSelectItem" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageCarouselSelectItem": { + "description": "An item in the carousel.", + "id": "GoogleCloudDialogflowV2IntentMessageCarouselSelectItem", + "properties": { + "description": { + "description": "Optional. The body text of the card.", + "type": "string" + }, + "image": { + "$ref": "GoogleCloudDialogflowV2IntentMessageImage", + "description": "Optional. The image to display." + }, + "info": { + "$ref": "GoogleCloudDialogflowV2IntentMessageSelectItemInfo", + "description": "Required. Additional info about the option item." + }, + "title": { + "description": "Required. Title of the carousel item.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageImage": { + "description": "The image response message.", + "id": "GoogleCloudDialogflowV2IntentMessageImage", + "properties": { + "accessibilityText": { + "description": "Optional. A text description of the image to be used for accessibility,\ne.g., screen readers.", + "type": "string" + }, + "imageUri": { + "description": "Optional. The public URI to an image file.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion": { + "description": "The suggestion chip message that allows the user to jump out to the app\nor website associated with this agent.", + "id": "GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion", + "properties": { + "destinationName": { + "description": "Required. The name of the app or site this chip is linking to.", + "type": "string" + }, + "uri": { + "description": "Required. The URI of the app or site to open when the user taps the\nsuggestion chip.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageListSelect": { + "description": "The card for presenting a list of options to select from.", + "id": "GoogleCloudDialogflowV2IntentMessageListSelect", + "properties": { + "items": { + "description": "Required. List items.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessageListSelectItem" + }, + "type": "array" + }, + "title": { + "description": "Optional. The overall title of the list.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageListSelectItem": { + "description": "An item in the list.", + "id": "GoogleCloudDialogflowV2IntentMessageListSelectItem", + "properties": { + "description": { + "description": "Optional. The main text describing the item.", + "type": "string" + }, + "image": { + "$ref": "GoogleCloudDialogflowV2IntentMessageImage", + "description": "Optional. The image to display." + }, + "info": { + "$ref": "GoogleCloudDialogflowV2IntentMessageSelectItemInfo", + "description": "Required. Additional information about this option." + }, + "title": { + "description": "Required. The title of the list item.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageQuickReplies": { + "description": "The quick replies response message.", + "id": "GoogleCloudDialogflowV2IntentMessageQuickReplies", + "properties": { + "quickReplies": { + "description": "Optional. The collection of quick replies.", + "items": { + "type": "string" + }, + "type": "array" + }, + "title": { + "description": "Optional. The title of the collection of quick replies.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageSelectItemInfo": { + "description": "Additional info about the select item for when it is triggered in a\ndialog.", + "id": "GoogleCloudDialogflowV2IntentMessageSelectItemInfo", + "properties": { + "key": { + "description": "Required. A unique key that will be sent back to the agent if this\nresponse is given.", + "type": "string" + }, + "synonyms": { + "description": "Optional. A list of synonyms that can also be used to trigger this\nitem in dialog.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageSimpleResponse": { + "description": "The simple response message containing speech or text.", + "id": "GoogleCloudDialogflowV2IntentMessageSimpleResponse", + "properties": { + "displayText": { + "description": "Optional. The text to display.", + "type": "string" + }, + "ssml": { + "description": "One of text_to_speech or ssml must be provided. Structured spoken\nresponse to the user in the SSML format. Mutually exclusive with\ntext_to_speech.", + "type": "string" + }, + "textToSpeech": { + "description": "One of text_to_speech or ssml must be provided. The plain text of the\nspeech output. Mutually exclusive with ssml.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageSimpleResponses": { + "description": "The collection of simple response candidates.\nThis message in `QueryResult.fulfillment_messages` and\n`WebhookResponse.fulfillment_messages` should contain only one\n`SimpleResponse`.", + "id": "GoogleCloudDialogflowV2IntentMessageSimpleResponses", + "properties": { + "simpleResponses": { + "description": "Required. The list of simple responses.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessageSimpleResponse" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageSuggestion": { + "description": "The suggestion chip message that the user can tap to quickly post a reply\nto the conversation.", + "id": "GoogleCloudDialogflowV2IntentMessageSuggestion", + "properties": { + "title": { + "description": "Required. The text shown the in the suggestion chip.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageSuggestions": { + "description": "The collection of suggestions.", + "id": "GoogleCloudDialogflowV2IntentMessageSuggestions", + "properties": { + "suggestions": { + "description": "Required. The list of suggested replies.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessageSuggestion" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentMessageText": { + "description": "The text response message.", + "id": "GoogleCloudDialogflowV2IntentMessageText", + "properties": { + "text": { + "description": "Optional. The collection of the agent's responses.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentParameter": { + "description": "Represents intent parameters.", + "id": "GoogleCloudDialogflowV2IntentParameter", + "properties": { + "defaultValue": { + "description": "Optional. The default value to use when the `value` yields an empty\nresult.\nDefault values can be extracted from contexts by using the following\nsyntax: `#context_name.parameter_name`.", + "type": "string" + }, + "displayName": { + "description": "Required. The name of the parameter.", + "type": "string" + }, + "entityTypeDisplayName": { + "description": "Optional. The name of the entity type, prefixed with `@`, that\ndescribes values of the parameter. If the parameter is\nrequired, this must be provided.", + "type": "string" + }, + "isList": { + "description": "Optional. Indicates whether the parameter represents a list of values.", + "type": "boolean" + }, + "mandatory": { + "description": "Optional. Indicates whether the parameter is required. That is,\nwhether the intent cannot be completed without collecting the parameter\nvalue.", + "type": "boolean" + }, + "name": { + "description": "The unique identifier of this parameter.", + "type": "string" + }, + "prompts": { + "description": "Optional. The collection of prompts that the agent can present to the\nuser in order to collect value for the parameter.", + "items": { + "type": "string" + }, + "type": "array" + }, + "value": { + "description": "Optional. The definition of the parameter value. It can be:\n- a constant string,\n- a parameter value defined as `$parameter_name`,\n- an original parameter value defined as `$parameter_name.original`,\n- a parameter value from some context defined as\n `#context_name.parameter_name`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentTrainingPhrase": { + "description": "Represents an example or template that the agent is trained on.", + "id": "GoogleCloudDialogflowV2IntentTrainingPhrase", + "properties": { + "name": { + "description": "Required. The unique identifier of this training phrase.", + "type": "string" + }, + "parts": { + "description": "Required. The collection of training phrase parts (can be annotated).\nFields: `entity_type`, `alias` and `user_defined` should be populated\nonly for the annotated parts of the training phrase.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentTrainingPhrasePart" + }, + "type": "array" + }, + "timesAddedCount": { + "description": "Optional. Indicates how many times this example or template was added to\nthe intent. Each time a developer adds an existing sample by editing an\nintent or training, this counter is increased.", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "Required. The type of the training phrase.", + "enum": [ + "TYPE_UNSPECIFIED", + "EXAMPLE", + "TEMPLATE" + ], + "enumDescriptions": [ + "Not specified. This value should never be used.", + "Examples do not contain @-prefixed entity type names, but example parts\ncan be annotated with entity types.", + "Templates are not annotated with entity types, but they can contain\n@-prefixed entity type names as substrings." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2IntentTrainingPhrasePart": { + "description": "Represents a part of a training phrase.", + "id": "GoogleCloudDialogflowV2IntentTrainingPhrasePart", + "properties": { + "alias": { + "description": "Optional. The parameter name for the value extracted from the\nannotated part of the example.", + "type": "string" + }, + "entityType": { + "description": "Optional. The entity type name prefixed with `@`. This field is\nrequired for the annotated part of the text and applies only to\nexamples.", + "type": "string" + }, + "text": { + "description": "Required. The text corresponding to the example or template,\nif there are no annotations. For\nannotated examples, it is the text for one of the example's parts.", + "type": "string" + }, + "userDefined": { + "description": "Optional. Indicates whether the text was manually annotated by the\ndeveloper.", + "type": "boolean" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ListContextsResponse": { + "description": "The response message for Contexts.ListContexts.", + "id": "GoogleCloudDialogflowV2ListContextsResponse", + "properties": { + "contexts": { + "description": "The list of contexts. There will be a maximum number of items\nreturned based on the page_size field in the request.", + "items": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no\nmore results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ListEntityTypesResponse": { + "description": "The response message for EntityTypes.ListEntityTypes.", + "id": "GoogleCloudDialogflowV2ListEntityTypesResponse", + "properties": { + "entityTypes": { + "description": "The list of agent entity types. There will be a maximum number of items\nreturned based on the page_size field in the request.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityType" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no\nmore results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ListIntentsResponse": { + "description": "The response message for Intents.ListIntents.", + "id": "GoogleCloudDialogflowV2ListIntentsResponse", + "properties": { + "intents": { + "description": "The list of agent intents. There will be a maximum number of items\nreturned based on the page_size field in the request.", + "items": { + "$ref": "GoogleCloudDialogflowV2Intent" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no\nmore results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2ListSessionEntityTypesResponse": { + "description": "The response message for SessionEntityTypes.ListSessionEntityTypes.", + "id": "GoogleCloudDialogflowV2ListSessionEntityTypesResponse", + "properties": { + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no\nmore results in the list.", + "type": "string" + }, + "sessionEntityTypes": { + "description": "The list of session entity types. There will be a maximum number of items\nreturned based on the page_size field in the request.", + "items": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2OriginalDetectIntentRequest": { + "description": "Represents the contents of the original request that was passed to\nthe `[Streaming]DetectIntent` call.", + "id": "GoogleCloudDialogflowV2OriginalDetectIntentRequest", + "properties": { + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. This field is set to the value of `QueryParameters.payload` field\npassed in the request.", + "type": "object" + }, + "source": { + "description": "The source of this request, e.g., `google`, `facebook`, `slack`. It is set\nby Dialogflow-owned servers. Possible values of this field correspond to\nIntent.Message.Platform.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2QueryInput": { + "description": "Represents the query input. It can contain either:\n\n1. An audio config which\n instructs the speech recognizer how to process the speech audio.\n\n2. A conversational query in the form of text,.\n\n3. An event that specifies which intent to trigger.", + "id": "GoogleCloudDialogflowV2QueryInput", + "properties": { + "audioConfig": { + "$ref": "GoogleCloudDialogflowV2InputAudioConfig", + "description": "Instructs the speech recognizer how to process the speech audio." + }, + "event": { + "$ref": "GoogleCloudDialogflowV2EventInput", + "description": "The event to be processed." + }, + "text": { + "$ref": "GoogleCloudDialogflowV2TextInput", + "description": "The natural language text to be processed." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2QueryParameters": { + "description": "Represents the parameters of the conversational query.", + "id": "GoogleCloudDialogflowV2QueryParameters", + "properties": { + "contexts": { + "description": "Optional. The collection of contexts to be activated before this query is\nexecuted.", + "items": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "type": "array" + }, + "geoLocation": { + "$ref": "GoogleTypeLatLng", + "description": "Optional. The geo location of this conversational query." + }, + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. This field can be used to pass custom data into the webhook\nassociated with the agent. Arbitrary JSON objects are supported.", + "type": "object" + }, + "resetContexts": { + "description": "Optional. Specifies whether to delete all contexts in the current session\nbefore the new ones are activated.", + "type": "boolean" + }, + "sessionEntityTypes": { + "description": "Optional. The collection of session entity types to replace or extend\ndeveloper entities with for this query only. The entity synonyms apply\nto all languages.", + "items": { + "$ref": "GoogleCloudDialogflowV2SessionEntityType" + }, + "type": "array" + }, + "timeZone": { + "description": "Optional. The time zone of this conversational query from the\n[time zone database](https://www.iana.org/time-zones), e.g.,\nAmerica/New_York, Europe/Paris. If not provided, the time zone specified in\nagent settings is used.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2QueryResult": { + "description": "Represents the result of conversational query or event processing.", + "id": "GoogleCloudDialogflowV2QueryResult", + "properties": { + "action": { + "description": "The action name from the matched intent.", + "type": "string" + }, + "allRequiredParamsPresent": { + "description": "This field is set to:\n- `false` if the matched intent has required parameters and not all of\n the required parameter values have been collected.\n- `true` if all required parameter values have been collected, or if the\n matched intent doesn't contain any required parameters.", + "type": "boolean" + }, + "diagnosticInfo": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The free-form diagnostic info. For example, this field\ncould contain webhook call latency.", + "type": "object" + }, + "fulfillmentMessages": { + "description": "The collection of rich messages to present to the user.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessage" + }, + "type": "array" + }, + "fulfillmentText": { + "description": "The text to be pronounced to the user or shown on the screen.", + "type": "string" + }, + "intent": { + "$ref": "GoogleCloudDialogflowV2Intent", + "description": "The intent that matched the conversational query. Some, not\nall fields are filled in this message, including but not limited to:\n`name`, `display_name` and `webhook_state`." + }, + "intentDetectionConfidence": { + "description": "The intent detection confidence. Values range from 0.0\n(completely uncertain) to 1.0 (completely certain).", + "format": "float", + "type": "number" + }, + "languageCode": { + "description": "The language that was triggered during intent detection.\nSee [Language Support](https://dialogflow.com/docs/reference/language)\nfor a list of the currently supported language codes.", + "type": "string" + }, + "outputContexts": { + "description": "The collection of output contexts. If applicable,\n`output_contexts.parameters` contains entries with name\n`\u003cparameter name\u003e.original` containing the original parameter values\nbefore the query.", + "items": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "type": "array" + }, + "parameters": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The collection of extracted parameters.", + "type": "object" + }, + "queryText": { + "description": "The original conversational query text:\n- If natural language text was provided as input, `query_text` contains\n a copy of the input.\n- If natural language speech audio was provided as input, `query_text`\n contains the speech recognition result. If speech recognizer produced\n multiple alternatives, a particular one is picked.\n- If an event was provided as input, `query_text` is not set.", + "type": "string" + }, + "speechRecognitionConfidence": { + "description": "The Speech recognition confidence between 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that the recognized words are\ncorrect. The default of 0.0 is a sentinel value indicating that confidence\nwas not set.\n\nYou should not rely on this field as it isn't guaranteed to be accurate, or\neven set. In particular this field isn't set in Webhook calls and for\nStreamingDetectIntent since the streaming endpoint has separate confidence\nestimates per portion of the audio in StreamingRecognitionResult.", + "format": "float", + "type": "number" + }, + "webhookPayload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "If the query was fulfilled by a webhook call, this field is set to the\nvalue of the `payload` field returned in the webhook response.", + "type": "object" + }, + "webhookSource": { + "description": "If the query was fulfilled by a webhook call, this field is set to the\nvalue of the `source` field returned in the webhook response.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2RestoreAgentRequest": { + "description": "The request message for Agents.RestoreAgent.", + "id": "GoogleCloudDialogflowV2RestoreAgentRequest", + "properties": { + "agentContent": { + "description": "The agent to restore.\n\nExample for how to restore an agent via the command line:\n\ncurl \\\n 'https://dialogflow.googleapis.com/v2/projects/\u003cproject_name\u003e/agent:restore\\\n -X POST \\\n -H 'Authorization: Bearer '$(gcloud auth print-access-token) \\\n -H 'Accept: application/json' \\\n -H 'Content-Type: application/json' \\\n --compressed \\\n --data-binary \"{\n 'agentContent': '$(cat \u003cagent zip file\u003e | base64 -w 0)'\n }\" \\", + "format": "byte", + "type": "string" + }, + "agentUri": { + "description": "The URI to a Google Cloud Storage file containing the agent to restore.\nNote: The URI must start with \"gs://\".", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2SearchAgentsResponse": { + "description": "The response message for Agents.SearchAgents.", + "id": "GoogleCloudDialogflowV2SearchAgentsResponse", + "properties": { + "agents": { + "description": "The list of agents. There will be a maximum number of items returned based\non the page_size field in the request.", + "items": { + "$ref": "GoogleCloudDialogflowV2Agent" + }, + "type": "array" + }, + "nextPageToken": { + "description": "Token to retrieve the next page of results, or empty if there are no\nmore results in the list.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2SessionEntityType": { + "description": "Represents a session entity type.\n\nExtends or replaces a developer entity type at the user session level (we\nrefer to the entity types defined at the agent level as \"developer entity\ntypes\").\n\nNote: session entity types apply to all queries, regardless of the language.", + "id": "GoogleCloudDialogflowV2SessionEntityType", + "properties": { + "entities": { + "description": "Required. The collection of entities associated with this session entity\ntype.", + "items": { + "$ref": "GoogleCloudDialogflowV2EntityTypeEntity" + }, + "type": "array" + }, + "entityOverrideMode": { + "description": "Required. Indicates whether the additional data should override or\nsupplement the developer entity type definition.", + "enum": [ + "ENTITY_OVERRIDE_MODE_UNSPECIFIED", + "ENTITY_OVERRIDE_MODE_OVERRIDE", + "ENTITY_OVERRIDE_MODE_SUPPLEMENT" + ], + "enumDescriptions": [ + "Not specified. This value should be never used.", + "The collection of session entities overrides the collection of entities\nin the corresponding developer entity type.", + "The collection of session entities extends the collection of entities in\nthe corresponding developer entity type.\nCalls to `ListSessionEntityTypes`, `GetSessionEntityType`,\n`CreateSessionEntityType` and `UpdateSessionEntityType` return the full\ncollection of entities from the developer entity type in the agent's\ndefault language and the session entity type." + ], + "type": "string" + }, + "name": { + "description": "Required. The unique identifier of this session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003esessions/\u003cSession\nID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2TextInput": { + "description": "Represents the natural language text to be processed.", + "id": "GoogleCloudDialogflowV2TextInput", + "properties": { + "languageCode": { + "description": "Required. The language of this conversational query. See [Language\nSupport](https://dialogflow.com/docs/languages) for a list of the\ncurrently supported language codes. Note that queries in the same session\ndo not necessarily need to specify the same language.", + "type": "string" + }, + "text": { + "description": "Required. The UTF-8 encoded natural language text to be processed.\nText length must not exceed 256 bytes.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2TrainAgentRequest": { + "description": "The request message for Agents.TrainAgent.", + "id": "GoogleCloudDialogflowV2TrainAgentRequest", + "properties": {}, + "type": "object" + }, + "GoogleCloudDialogflowV2WebhookRequest": { + "description": "The request message for a webhook call.", + "id": "GoogleCloudDialogflowV2WebhookRequest", + "properties": { + "originalDetectIntentRequest": { + "$ref": "GoogleCloudDialogflowV2OriginalDetectIntentRequest", + "description": "Optional. The contents of the original request that was passed to\n`[Streaming]DetectIntent` call." + }, + "queryResult": { + "$ref": "GoogleCloudDialogflowV2QueryResult", + "description": "The result of the conversational query or event processing. Contains the\nsame value as `[Streaming]DetectIntentResponse.query_result`." + }, + "responseId": { + "description": "The unique identifier of the response. Contains the same value as\n`[Streaming]DetectIntentResponse.response_id`.", + "type": "string" + }, + "session": { + "description": "The unique identifier of detectIntent request session.\nCan be used to identify end-user inside webhook implementation.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2WebhookResponse": { + "description": "The response message for a webhook call.", + "id": "GoogleCloudDialogflowV2WebhookResponse", + "properties": { + "followupEventInput": { + "$ref": "GoogleCloudDialogflowV2EventInput", + "description": "Optional. Makes the platform immediately invoke another `DetectIntent` call\ninternally with the specified event as input." + }, + "fulfillmentMessages": { + "description": "Optional. The collection of rich messages to present to the user. This\nvalue is passed directly to `QueryResult.fulfillment_messages`.", + "items": { + "$ref": "GoogleCloudDialogflowV2IntentMessage" + }, + "type": "array" + }, + "fulfillmentText": { + "description": "Optional. The text to be shown on the screen. This value is passed directly\nto `QueryResult.fulfillment_text`.", + "type": "string" + }, + "outputContexts": { + "description": "Optional. The collection of output contexts. This value is passed directly\nto `QueryResult.output_contexts`.", + "items": { + "$ref": "GoogleCloudDialogflowV2Context" + }, + "type": "array" + }, + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. This value is passed directly to `QueryResult.webhook_payload`.", + "type": "object" + }, + "source": { + "description": "Optional. This value is passed directly to `QueryResult.webhook_source`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1BatchUpdateEntityTypesResponse": { + "description": "The response message for EntityTypes.BatchUpdateEntityTypes.", + "id": "GoogleCloudDialogflowV2beta1BatchUpdateEntityTypesResponse", + "properties": { + "entityTypes": { + "description": "The collection of updated or created entity types.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1EntityType" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1BatchUpdateIntentsResponse": { + "description": "The response message for Intents.BatchUpdateIntents.", + "id": "GoogleCloudDialogflowV2beta1BatchUpdateIntentsResponse", + "properties": { + "intents": { + "description": "The collection of updated or created intents.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1Intent" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1Context": { + "description": "Represents a context.", + "id": "GoogleCloudDialogflowV2beta1Context", + "properties": { + "lifespanCount": { + "description": "Optional. The number of conversational query requests after which the\ncontext expires. If set to `0` (the default) the context expires\nimmediately. Contexts expire automatically after 10 minutes even if there\nare no matching queries.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "Required. The unique identifier of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`,\nor\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nThe Context ID is always converted to lowercase.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + "type": "string" + }, + "parameters": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. The collection of parameters associated with this context.\nRefer to [this doc](https://dialogflow.com/docs/actions-and-parameters) for\nsyntax.", + "type": "object" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1EntityType": { + "description": "Represents an entity type.\nEntity types serve as a tool for extracting parameter values from natural\nlanguage queries.", + "id": "GoogleCloudDialogflowV2beta1EntityType", + "properties": { + "autoExpansionMode": { + "description": "Optional. Indicates whether the entity type can be automatically\nexpanded.", + "enum": [ + "AUTO_EXPANSION_MODE_UNSPECIFIED", + "AUTO_EXPANSION_MODE_DEFAULT" + ], + "enumDescriptions": [ + "Auto expansion disabled for the entity.", + "Allows an agent to recognize values that have not been explicitly\nlisted in the entity." + ], + "type": "string" + }, + "displayName": { + "description": "Required. The name of the entity.", + "type": "string" + }, + "entities": { + "description": "Optional. The collection of entities associated with the entity type.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1EntityTypeEntity" + }, + "type": "array" + }, + "kind": { + "description": "Required. Indicates the kind of entity type.", + "enum": [ + "KIND_UNSPECIFIED", + "KIND_MAP", + "KIND_LIST" + ], + "enumDescriptions": [ + "Not specified. This value should be never used.", + "Map entity types allow mapping of a group of synonyms to a canonical\nvalue.", + "List entity types contain a set of entries that do not map to canonical\nvalues. However, list entity types can contain references to other entity\ntypes (with or without aliases)." + ], + "type": "string" + }, + "name": { + "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of the entity type. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1EntityTypeEntity": { + "description": "Optional. Represents an entity.", + "id": "GoogleCloudDialogflowV2beta1EntityTypeEntity", + "properties": { + "synonyms": { + "description": "Required. A collection of synonyms. For `KIND_LIST` entity types this\nmust contain exactly one synonym equal to `value`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "value": { + "description": "Required.\nFor `KIND_MAP` entity types:\n A canonical name to be used in place of synonyms.\nFor `KIND_LIST` entity types:\n A string that can contain references to other entity types (with or\n without aliases).", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1EventInput": { + "description": "Events allow for matching intents by event name instead of the natural\nlanguage input. For instance, input `\u003cevent: { name: “welcome_event”,\nparameters: { name: “Sam” } }\u003e` can trigger a personalized welcome response.\nThe parameter `name` may be used by the agent in the response:\n`“Hello #welcome_event.name! What can I do for you today?”`.", + "id": "GoogleCloudDialogflowV2beta1EventInput", + "properties": { + "languageCode": { + "description": "Required. The language of this query. See [Language\nSupport](https://dialogflow.com/docs/languages) for a list of the\ncurrently supported language codes. Note that queries in the same session\ndo not necessarily need to specify the same language.", + "type": "string" + }, + "name": { + "description": "Required. The unique identifier of the event.", + "type": "string" + }, + "parameters": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. The collection of parameters associated with the event.", + "type": "object" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1ExportAgentResponse": { + "description": "The response message for Agents.ExportAgent.", + "id": "GoogleCloudDialogflowV2beta1ExportAgentResponse", + "properties": { + "agentContent": { + "description": "The exported agent.\n\nExample for how to export an agent to a zip file via a command line:\n\ncurl \\\n 'https://dialogflow.googleapis.com/v2beta1/projects/\u003cproject_name\u003e/agent:export'\\\n -X POST \\\n -H 'Authorization: Bearer '$(gcloud auth print-access-token) \\\n -H 'Accept: application/json' \\\n -H 'Content-Type: application/json' \\\n --compressed \\\n --data-binary '{}' \\\n| grep agentContent | sed -e 's/.*\"agentContent\": \"\\([^\"]*\\)\".*/\\1/' \\\n| base64 --decode \u003e \u003cagent zip file\u003e", + "format": "byte", + "type": "string" + }, + "agentUri": { + "description": "The URI to a file containing the exported agent. This field is populated\nonly if `agent_uri` is specified in `ExportAgentRequest`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1Intent": { + "description": "Represents an intent.\nIntents convert a number of user expressions or patterns into an action. An\naction is an extraction of a user command or sentence semantics.", + "id": "GoogleCloudDialogflowV2beta1Intent", + "properties": { + "action": { + "description": "Optional. The name of the action associated with the intent.", + "type": "string" + }, + "defaultResponsePlatforms": { + "description": "Optional. The list of platforms for which the first response will be\ntaken from among the messages assigned to the DEFAULT_PLATFORM.", + "enumDescriptions": [ + "Not specified.", + "Facebook.", + "Slack.", + "Telegram.", + "Kik.", + "Skype.", + "Line.", + "Viber.", + "Actions on Google." + ], + "items": { + "enum": [ + "PLATFORM_UNSPECIFIED", + "FACEBOOK", + "SLACK", + "TELEGRAM", + "KIK", + "SKYPE", + "LINE", + "VIBER", + "ACTIONS_ON_GOOGLE" + ], + "type": "string" + }, + "type": "array" + }, + "displayName": { + "description": "Required. The name of this intent.", + "type": "string" + }, + "events": { + "description": "Optional. The collection of event names that trigger the intent.\nIf the collection of input contexts is not empty, all of the contexts must\nbe present in the active user session for an event to trigger this intent.", + "items": { + "type": "string" + }, + "type": "array" + }, + "followupIntentInfo": { + "description": "Optional. Collection of information about all followup intents that have\nname of this intent as a root_name.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo" + }, + "type": "array" + }, + "inputContextNames": { + "description": "Optional. The list of context names required for this intent to be\ntriggered.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/-/contexts/\u003cContext ID\u003e`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "isFallback": { + "description": "Optional. Indicates whether this is a fallback intent.", + "type": "boolean" + }, + "messages": { + "description": "Optional. The collection of rich messages corresponding to the\n`Response` field in API.AI console.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessage" + }, + "type": "array" + }, + "mlDisabled": { + "description": "Optional. Indicates whether Machine Learning is disabled for the intent.\nNote: If `ml_disabled` setting is set to true, then this intent is not\ntaken into account during inference in `ML ONLY` match mode. Also,\nauto-markup in the UI is turned off.", + "type": "boolean" + }, + "mlEnabled": { + "description": "Optional. Indicates whether Machine Learning is enabled for the intent.\nNote: If `ml_enabled` setting is set to false, then this intent is not\ntaken into account during inference in `ML ONLY` match mode. Also,\nauto-markup in the UI is turned off.\nDEPRECATED! Please use `ml_disabled` field instead.\nNOTE: If neither `ml_enabled` nor `ml_disabled` field is set, then the\ndefault value is determined as follows:\n- Before April 15th, 2018 the default is:\n ml_enabled = false / ml_disabled = true.\n- After April 15th, 2018 the default is:\n ml_enabled = true / ml_disabled = false.", + "type": "boolean" + }, + "name": { + "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of this intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "outputContexts": { + "description": "Optional. The collection of contexts that are activated when the intent\nis matched. Context messages in this collection should not set the\nparameters field. Setting the `lifespan_count` to 0 will reset the context\nwhen the intent is matched.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/-/contexts/\u003cContext ID\u003e`.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1Context" + }, + "type": "array" + }, + "parameters": { + "description": "Optional. The collection of parameters associated with the intent.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentParameter" + }, + "type": "array" + }, + "parentFollowupIntentName": { + "description": "The unique identifier of the parent intent in the chain of followup\nintents.\nIt identifies the parent followup intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "priority": { + "description": "Optional. The priority of this intent. Higher numbers represent higher\npriorities. Zero or negative numbers mean that the intent is disabled.", + "format": "int32", + "type": "integer" + }, + "resetContexts": { + "description": "Optional. Indicates whether to delete all contexts in the current\nsession when this intent is matched.", + "type": "boolean" + }, + "rootFollowupIntentName": { + "description": "The unique identifier of the root intent in the chain of followup intents.\nIt identifies the correct followup intents chain for this intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "trainingPhrases": { + "description": "Optional. The collection of examples/templates that the agent is\ntrained on.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentTrainingPhrase" + }, + "type": "array" + }, + "webhookState": { + "description": "Required. Indicates whether webhooks are enabled for the intent.", + "enum": [ + "WEBHOOK_STATE_UNSPECIFIED", + "WEBHOOK_STATE_ENABLED", + "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING" + ], + "enumDescriptions": [ + "Webhook is disabled in the agent and in the intent.", + "Webhook is enabled in the agent and in the intent.", + "Webhook is enabled in the agent and in the intent. Also, each slot\nfilling prompt is forwarded to the webhook." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo": { + "description": "Represents a single followup intent in the chain.", + "id": "GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo", + "properties": { + "followupIntentName": { + "description": "The unique identifier of the followup intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + }, + "parentFollowupIntentName": { + "description": "The unique identifier of the followup intent parent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessage": { + "description": "Corresponds to the `Response` field in API.AI console.", + "id": "GoogleCloudDialogflowV2beta1IntentMessage", + "properties": { + "basicCard": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageBasicCard", + "description": "Displays a basic card for Actions on Google." + }, + "card": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageCard", + "description": "Displays a card." + }, + "carouselSelect": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect", + "description": "Displays a carousel card for Actions on Google." + }, + "image": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageImage", + "description": "Displays an image." + }, + "linkOutSuggestion": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion", + "description": "Displays a link out suggestion chip for Actions on Google." + }, + "listSelect": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageListSelect", + "description": "Displays a list card for Actions on Google." + }, + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Returns a response containing a custom payload.", + "type": "object" + }, + "platform": { + "description": "Optional. The platform that this message is intended for.", + "enum": [ + "PLATFORM_UNSPECIFIED", + "FACEBOOK", + "SLACK", + "TELEGRAM", + "KIK", + "SKYPE", + "LINE", + "VIBER", + "ACTIONS_ON_GOOGLE" + ], + "enumDescriptions": [ + "Not specified.", + "Facebook.", + "Slack.", + "Telegram.", + "Kik.", + "Skype.", + "Line.", + "Viber.", + "Actions on Google." + ], + "type": "string" + }, + "quickReplies": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageQuickReplies", + "description": "Displays quick replies." + }, + "simpleResponses": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses", + "description": "Returns a voice or text-only response for Actions on Google." + }, + "suggestions": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageSuggestions", + "description": "Displays suggestion chips for Actions on Google." + }, + "text": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageText", + "description": "Returns a text response." + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageBasicCard": { + "description": "The basic card message. Useful for displaying information.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageBasicCard", + "properties": { + "buttons": { + "description": "Optional. The collection of card buttons.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton" + }, + "type": "array" + }, + "formattedText": { + "description": "Required, unless image is present. The body text of the card.", + "type": "string" + }, + "image": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageImage", + "description": "Optional. The image for the card." + }, + "subtitle": { + "description": "Optional. The subtitle of the card.", + "type": "string" + }, + "title": { + "description": "Optional. The title of the card.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton": { + "description": "The button object that appears at the bottom of a card.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton", + "properties": { + "openUriAction": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction", + "description": "Required. Action to take when a user taps on the button." + }, + "title": { + "description": "Required. The title of the button.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction": { + "description": "Opens the given URI.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction", + "properties": { + "uri": { + "description": "Required. The HTTP or HTTPS scheme URI.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageCard": { + "description": "The card response message.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageCard", + "properties": { + "buttons": { + "description": "Optional. The collection of card buttons.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageCardButton" + }, + "type": "array" + }, + "imageUri": { + "description": "Optional. The public URI to an image file for the card.", + "type": "string" + }, + "subtitle": { + "description": "Optional. The subtitle of the card.", + "type": "string" + }, + "title": { + "description": "Optional. The title of the card.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageCardButton": { + "description": "Optional. Contains information about a button.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageCardButton", + "properties": { + "postback": { + "description": "Optional. The text to send back to the Dialogflow API or a URI to\nopen.", + "type": "string" + }, + "text": { + "description": "Optional. The text to show on the button.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect": { + "description": "The card for presenting a carousel of options to select from.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect", + "properties": { + "items": { + "description": "Required. Carousel items.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem": { + "description": "An item in the carousel.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem", + "properties": { + "description": { + "description": "Optional. The body text of the card.", + "type": "string" + }, + "image": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageImage", + "description": "Optional. The image to display." + }, + "info": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo", + "description": "Required. Additional info about the option item." + }, + "title": { + "description": "Required. Title of the carousel item.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageImage": { + "description": "The image response message.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageImage", + "properties": { + "accessibilityText": { + "description": "Optional. A text description of the image to be used for accessibility,\ne.g., screen readers.", + "type": "string" + }, + "imageUri": { + "description": "Optional. The public URI to an image file.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion": { + "description": "The suggestion chip message that allows the user to jump out to the app\nor website associated with this agent.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion", + "properties": { + "destinationName": { + "description": "Required. The name of the app or site this chip is linking to.", + "type": "string" + }, + "uri": { + "description": "Required. The URI of the app or site to open when the user taps the\nsuggestion chip.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageListSelect": { + "description": "The card for presenting a list of options to select from.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageListSelect", + "properties": { + "items": { + "description": "Required. List items.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageListSelectItem" + }, + "type": "array" + }, + "title": { + "description": "Optional. The overall title of the list.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageListSelectItem": { + "description": "An item in the list.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageListSelectItem", + "properties": { + "description": { + "description": "Optional. The main text describing the item.", + "type": "string" + }, + "image": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageImage", + "description": "Optional. The image to display." + }, + "info": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo", + "description": "Required. Additional information about this option." + }, + "title": { + "description": "Required. The title of the list item.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageQuickReplies": { + "description": "The quick replies response message.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageQuickReplies", + "properties": { + "quickReplies": { + "description": "Optional. The collection of quick replies.", + "items": { + "type": "string" + }, + "type": "array" + }, + "title": { + "description": "Optional. The title of the collection of quick replies.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo": { + "description": "Additional info about the select item for when it is triggered in a\ndialog.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo", + "properties": { + "key": { + "description": "Required. A unique key that will be sent back to the agent if this\nresponse is given.", + "type": "string" + }, + "synonyms": { + "description": "Optional. A list of synonyms that can also be used to trigger this\nitem in dialog.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse": { + "description": "The simple response message containing speech or text.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse", + "properties": { + "displayText": { + "description": "Optional. The text to display.", + "type": "string" + }, + "ssml": { + "description": "One of text_to_speech or ssml must be provided. Structured spoken\nresponse to the user in the SSML format. Mutually exclusive with\ntext_to_speech.", + "type": "string" + }, + "textToSpeech": { + "description": "One of text_to_speech or ssml must be provided. The plain text of the\nspeech output. Mutually exclusive with ssml.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses": { + "description": "The collection of simple response candidates.\nThis message in `QueryResult.fulfillment_messages` and\n`WebhookResponse.fulfillment_messages` should contain only one\n`SimpleResponse`.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses", + "properties": { + "simpleResponses": { + "description": "Required. The list of simple responses.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageSuggestion": { + "description": "The suggestion chip message that the user can tap to quickly post a reply\nto the conversation.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageSuggestion", + "properties": { + "title": { + "description": "Required. The text shown the in the suggestion chip.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageSuggestions": { + "description": "The collection of suggestions.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageSuggestions", + "properties": { + "suggestions": { + "description": "Required. The list of suggested replies.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessageSuggestion" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentMessageText": { + "description": "The text response message.", + "id": "GoogleCloudDialogflowV2beta1IntentMessageText", + "properties": { + "text": { + "description": "Optional. The collection of the agent's responses.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentParameter": { + "description": "Represents intent parameters.", + "id": "GoogleCloudDialogflowV2beta1IntentParameter", + "properties": { + "defaultValue": { + "description": "Optional. The default value to use when the `value` yields an empty\nresult.\nDefault values can be extracted from contexts by using the following\nsyntax: `#context_name.parameter_name`.", + "type": "string" + }, + "displayName": { + "description": "Required. The name of the parameter.", + "type": "string" + }, + "entityTypeDisplayName": { + "description": "Optional. The name of the entity type, prefixed with `@`, that\ndescribes values of the parameter. If the parameter is\nrequired, this must be provided.", + "type": "string" + }, + "isList": { + "description": "Optional. Indicates whether the parameter represents a list of values.", + "type": "boolean" + }, + "mandatory": { + "description": "Optional. Indicates whether the parameter is required. That is,\nwhether the intent cannot be completed without collecting the parameter\nvalue.", + "type": "boolean" + }, + "name": { + "description": "The unique identifier of this parameter.", + "type": "string" + }, + "prompts": { + "description": "Optional. The collection of prompts that the agent can present to the\nuser in order to collect value for the parameter.", + "items": { + "type": "string" + }, + "type": "array" + }, + "value": { + "description": "Optional. The definition of the parameter value. It can be:\n- a constant string,\n- a parameter value defined as `$parameter_name`,\n- an original parameter value defined as `$parameter_name.original`,\n- a parameter value from some context defined as\n `#context_name.parameter_name`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentTrainingPhrase": { + "description": "Represents an example or template that the agent is trained on.", + "id": "GoogleCloudDialogflowV2beta1IntentTrainingPhrase", + "properties": { + "name": { + "description": "Required. The unique identifier of this training phrase.", + "type": "string" + }, + "parts": { + "description": "Required. The collection of training phrase parts (can be annotated).\nFields: `entity_type`, `alias` and `user_defined` should be populated\nonly for the annotated parts of the training phrase.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart" + }, + "type": "array" + }, + "timesAddedCount": { + "description": "Optional. Indicates how many times this example or template was added to\nthe intent. Each time a developer adds an existing sample by editing an\nintent or training, this counter is increased.", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "Required. The type of the training phrase.", + "enum": [ + "TYPE_UNSPECIFIED", + "EXAMPLE", + "TEMPLATE" + ], + "enumDescriptions": [ + "Not specified. This value should never be used.", + "Examples do not contain @-prefixed entity type names, but example parts\ncan be annotated with entity types.", + "Templates are not annotated with entity types, but they can contain\n@-prefixed entity type names as substrings." + ], + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart": { + "description": "Represents a part of a training phrase.", + "id": "GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart", + "properties": { + "alias": { + "description": "Optional. The parameter name for the value extracted from the\nannotated part of the example.", + "type": "string" + }, + "entityType": { + "description": "Optional. The entity type name prefixed with `@`. This field is\nrequired for the annotated part of the text and applies only to\nexamples.", + "type": "string" + }, + "text": { + "description": "Required. The text corresponding to the example or template,\nif there are no annotations. For\nannotated examples, it is the text for one of the example's parts.", + "type": "string" + }, + "userDefined": { + "description": "Optional. Indicates whether the text was manually annotated by the\ndeveloper.", + "type": "boolean" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest": { + "description": "Represents the contents of the original request that was passed to\nthe `[Streaming]DetectIntent` call.", + "id": "GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest", + "properties": { + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. This field is set to the value of `QueryParameters.payload` field\npassed in the request.", + "type": "object" + }, + "source": { + "description": "The source of this request, e.g., `google`, `facebook`, `slack`. It is set\nby Dialogflow-owned servers. Possible values of this field correspond to\nIntent.Message.Platform.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1QueryResult": { + "description": "Represents the result of conversational query or event processing.", + "id": "GoogleCloudDialogflowV2beta1QueryResult", + "properties": { + "action": { + "description": "The action name from the matched intent.", + "type": "string" + }, + "allRequiredParamsPresent": { + "description": "This field is set to:\n- `false` if the matched intent has required parameters and not all of\n the required parameter values have been collected.\n- `true` if all required parameter values have been collected, or if the\n matched intent doesn't contain any required parameters.", + "type": "boolean" + }, + "diagnosticInfo": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The free-form diagnostic info. For example, this field\ncould contain webhook call latency.", + "type": "object" + }, + "fulfillmentMessages": { + "description": "The collection of rich messages to present to the user.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessage" + }, + "type": "array" + }, + "fulfillmentText": { + "description": "The text to be pronounced to the user or shown on the screen.", + "type": "string" + }, + "intent": { + "$ref": "GoogleCloudDialogflowV2beta1Intent", + "description": "The intent that matched the conversational query. Some, not\nall fields are filled in this message, including but not limited to:\n`name`, `display_name` and `webhook_state`." + }, + "intentDetectionConfidence": { + "description": "The intent detection confidence. Values range from 0.0\n(completely uncertain) to 1.0 (completely certain).", + "format": "float", + "type": "number" + }, + "languageCode": { + "description": "The language that was triggered during intent detection.\nSee [Language Support](https://dialogflow.com/docs/reference/language)\nfor a list of the currently supported language codes.", + "type": "string" + }, + "outputContexts": { + "description": "The collection of output contexts. If applicable,\n`output_contexts.parameters` contains entries with name\n`\u003cparameter name\u003e.original` containing the original parameter values\nbefore the query.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1Context" + }, + "type": "array" + }, + "parameters": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "The collection of extracted parameters.", + "type": "object" + }, + "queryText": { + "description": "The original conversational query text:\n- If natural language text was provided as input, `query_text` contains\n a copy of the input.\n- If natural language speech audio was provided as input, `query_text`\n contains the speech recognition result. If speech recognizer produced\n multiple alternatives, a particular one is picked.\n- If an event was provided as input, `query_text` is not set.", + "type": "string" + }, + "speechRecognitionConfidence": { + "description": "The Speech recognition confidence between 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that the recognized words are\ncorrect. The default of 0.0 is a sentinel value indicating that confidence\nwas not set.\n\nYou should not rely on this field as it isn't guaranteed to be accurate, or\neven set. In particular this field isn't set in Webhook calls and for\nStreamingDetectIntent since the streaming endpoint has separate confidence\nestimates per portion of the audio in StreamingRecognitionResult.", + "format": "float", + "type": "number" + }, + "webhookPayload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "If the query was fulfilled by a webhook call, this field is set to the\nvalue of the `payload` field returned in the webhook response.", + "type": "object" + }, + "webhookSource": { + "description": "If the query was fulfilled by a webhook call, this field is set to the\nvalue of the `source` field returned in the webhook response.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1WebhookRequest": { + "description": "The request message for a webhook call.", + "id": "GoogleCloudDialogflowV2beta1WebhookRequest", + "properties": { + "originalDetectIntentRequest": { + "$ref": "GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest", + "description": "Optional. The contents of the original request that was passed to\n`[Streaming]DetectIntent` call." + }, + "queryResult": { + "$ref": "GoogleCloudDialogflowV2beta1QueryResult", + "description": "The result of the conversational query or event processing. Contains the\nsame value as `[Streaming]DetectIntentResponse.query_result`." + }, + "responseId": { + "description": "The unique identifier of the response. Contains the same value as\n`[Streaming]DetectIntentResponse.response_id`.", + "type": "string" + }, + "session": { + "description": "The unique identifier of detectIntent request session.\nCan be used to identify end-user inside webhook implementation.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleCloudDialogflowV2beta1WebhookResponse": { + "description": "The response message for a webhook call.", + "id": "GoogleCloudDialogflowV2beta1WebhookResponse", + "properties": { + "followupEventInput": { + "$ref": "GoogleCloudDialogflowV2beta1EventInput", + "description": "Optional. Makes the platform immediately invoke another `DetectIntent` call\ninternally with the specified event as input." + }, + "fulfillmentMessages": { + "description": "Optional. The collection of rich messages to present to the user. This\nvalue is passed directly to `QueryResult.fulfillment_messages`.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1IntentMessage" + }, + "type": "array" + }, + "fulfillmentText": { + "description": "Optional. The text to be shown on the screen. This value is passed directly\nto `QueryResult.fulfillment_text`.", + "type": "string" + }, + "outputContexts": { + "description": "Optional. The collection of output contexts. This value is passed directly\nto `QueryResult.output_contexts`.", + "items": { + "$ref": "GoogleCloudDialogflowV2beta1Context" + }, + "type": "array" + }, + "payload": { + "additionalProperties": { + "description": "Properties of the object.", + "type": "any" + }, + "description": "Optional. This value is passed directly to `QueryResult.webhook_payload`.", + "type": "object" + }, + "source": { + "description": "Optional. This value is passed directly to `QueryResult.webhook_source`.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", + "id": "GoogleLongrunningOperation", + "properties": { + "done": { + "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", + "type": "boolean" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", + "type": "object" + } + }, + "type": "object" + }, + "GoogleProtobufEmpty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "id": "GoogleProtobufEmpty", + "properties": {}, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "id": "GoogleRpcStatus", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleTypeLatLng": { + "description": "An object representing a latitude/longitude pair. This is expressed as a pair\nof doubles representing degrees latitude and degrees longitude. Unless\nspecified otherwise, this must conform to the\n\u003ca href=\"http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf\"\u003eWGS84\nstandard\u003c/a\u003e. Values must be within normalized ranges.", + "id": "GoogleTypeLatLng", + "properties": { + "latitude": { + "description": "The latitude in degrees. It must be in the range [-90.0, +90.0].", + "format": "double", + "type": "number" + }, + "longitude": { + "description": "The longitude in degrees. It must be in the range [-180.0, +180.0].", + "format": "double", + "type": "number" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "Dialogflow API", + "version": "v2", + "version_module": true +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/dialogflow/v2/dialogflow-gen.go b/vendor/google.golang.org/api/dialogflow/v2/dialogflow-gen.go new file mode 100644 index 000000000..4fe025796 --- /dev/null +++ b/vendor/google.golang.org/api/dialogflow/v2/dialogflow-gen.go @@ -0,0 +1,12293 @@ +// Package dialogflow provides access to the Dialogflow API. +// +// See https://cloud.google.com/dialogflow-enterprise/ +// +// Usage example: +// +// import "google.golang.org/api/dialogflow/v2" +// ... +// dialogflowService, err := dialogflow.New(oauthHttpClient) +package dialogflow // import "google.golang.org/api/dialogflow/v2" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "dialogflow:v2" +const apiName = "dialogflow" +const apiVersion = "v2" +const basePath = "https://dialogflow.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Agent = NewProjectsAgentService(s) + rs.Operations = NewProjectsOperationsService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Agent *ProjectsAgentService + + Operations *ProjectsOperationsService +} + +func NewProjectsAgentService(s *Service) *ProjectsAgentService { + rs := &ProjectsAgentService{s: s} + rs.EntityTypes = NewProjectsAgentEntityTypesService(s) + rs.Intents = NewProjectsAgentIntentsService(s) + rs.Runtimes = NewProjectsAgentRuntimesService(s) + rs.Sessions = NewProjectsAgentSessionsService(s) + return rs +} + +type ProjectsAgentService struct { + s *Service + + EntityTypes *ProjectsAgentEntityTypesService + + Intents *ProjectsAgentIntentsService + + Runtimes *ProjectsAgentRuntimesService + + Sessions *ProjectsAgentSessionsService +} + +func NewProjectsAgentEntityTypesService(s *Service) *ProjectsAgentEntityTypesService { + rs := &ProjectsAgentEntityTypesService{s: s} + rs.Entities = NewProjectsAgentEntityTypesEntitiesService(s) + return rs +} + +type ProjectsAgentEntityTypesService struct { + s *Service + + Entities *ProjectsAgentEntityTypesEntitiesService +} + +func NewProjectsAgentEntityTypesEntitiesService(s *Service) *ProjectsAgentEntityTypesEntitiesService { + rs := &ProjectsAgentEntityTypesEntitiesService{s: s} + return rs +} + +type ProjectsAgentEntityTypesEntitiesService struct { + s *Service +} + +func NewProjectsAgentIntentsService(s *Service) *ProjectsAgentIntentsService { + rs := &ProjectsAgentIntentsService{s: s} + return rs +} + +type ProjectsAgentIntentsService struct { + s *Service +} + +func NewProjectsAgentRuntimesService(s *Service) *ProjectsAgentRuntimesService { + rs := &ProjectsAgentRuntimesService{s: s} + rs.Sessions = NewProjectsAgentRuntimesSessionsService(s) + return rs +} + +type ProjectsAgentRuntimesService struct { + s *Service + + Sessions *ProjectsAgentRuntimesSessionsService +} + +func NewProjectsAgentRuntimesSessionsService(s *Service) *ProjectsAgentRuntimesSessionsService { + rs := &ProjectsAgentRuntimesSessionsService{s: s} + rs.Contexts = NewProjectsAgentRuntimesSessionsContextsService(s) + rs.EntityTypes = NewProjectsAgentRuntimesSessionsEntityTypesService(s) + return rs +} + +type ProjectsAgentRuntimesSessionsService struct { + s *Service + + Contexts *ProjectsAgentRuntimesSessionsContextsService + + EntityTypes *ProjectsAgentRuntimesSessionsEntityTypesService +} + +func NewProjectsAgentRuntimesSessionsContextsService(s *Service) *ProjectsAgentRuntimesSessionsContextsService { + rs := &ProjectsAgentRuntimesSessionsContextsService{s: s} + return rs +} + +type ProjectsAgentRuntimesSessionsContextsService struct { + s *Service +} + +func NewProjectsAgentRuntimesSessionsEntityTypesService(s *Service) *ProjectsAgentRuntimesSessionsEntityTypesService { + rs := &ProjectsAgentRuntimesSessionsEntityTypesService{s: s} + return rs +} + +type ProjectsAgentRuntimesSessionsEntityTypesService struct { + s *Service +} + +func NewProjectsAgentSessionsService(s *Service) *ProjectsAgentSessionsService { + rs := &ProjectsAgentSessionsService{s: s} + rs.Contexts = NewProjectsAgentSessionsContextsService(s) + rs.EntityTypes = NewProjectsAgentSessionsEntityTypesService(s) + return rs +} + +type ProjectsAgentSessionsService struct { + s *Service + + Contexts *ProjectsAgentSessionsContextsService + + EntityTypes *ProjectsAgentSessionsEntityTypesService +} + +func NewProjectsAgentSessionsContextsService(s *Service) *ProjectsAgentSessionsContextsService { + rs := &ProjectsAgentSessionsContextsService{s: s} + return rs +} + +type ProjectsAgentSessionsContextsService struct { + s *Service +} + +func NewProjectsAgentSessionsEntityTypesService(s *Service) *ProjectsAgentSessionsEntityTypesService { + rs := &ProjectsAgentSessionsEntityTypesService{s: s} + return rs +} + +type ProjectsAgentSessionsEntityTypesService struct { + s *Service +} + +func NewProjectsOperationsService(s *Service) *ProjectsOperationsService { + rs := &ProjectsOperationsService{s: s} + return rs +} + +type ProjectsOperationsService struct { + s *Service +} + +// GoogleCloudDialogflowV2Agent: Represents a conversational agent. +type GoogleCloudDialogflowV2Agent struct { + // AvatarUri: Optional. The URI of the agent's avatar. + // Avatars are used throughout API.AI console and in the + // self-hosted + // [Web Demo](https://dialogflow.com/docs/integrations/web-demo) + // integration. + AvatarUri string `json:"avatarUri,omitempty"` + + // ClassificationThreshold: Optional. To filter out false positive + // results and still get variety in + // matched natural language inputs for your agent, you can tune the + // machine + // learning classification threshold. If the returned score value is + // less than + // the threshold value, then a fallback intent is be triggered or, if + // there + // are no fallback intents defined, no intent will be triggered. The + // score + // values range from 0.0 (completely uncertain) to 1.0 (completely + // certain). + // If set to 0.0, the default of 0.3 is used. + ClassificationThreshold float64 `json:"classificationThreshold,omitempty"` + + // DefaultLanguageCode: Required. The default language of the agent as a + // language tag. See + // [Language Support](https://dialogflow.com/docs/reference/language) + // for a + // list of the currently supported language codes. + // This field cannot be set by the `Update` method. + DefaultLanguageCode string `json:"defaultLanguageCode,omitempty"` + + // Description: Optional. The description of this agent. + // The maximum length is 500 characters. If exceeded, the request is + // rejected. + Description string `json:"description,omitempty"` + + // DisplayName: Required. The name of this agent. + DisplayName string `json:"displayName,omitempty"` + + // EnableLogging: Optional. Determines whether this agent should log + // conversation queries. + EnableLogging bool `json:"enableLogging,omitempty"` + + // MatchMode: Optional. Determines how intents are detected from user + // queries. + // + // Possible values: + // "MATCH_MODE_UNSPECIFIED" - Not specified. + // "MATCH_MODE_HYBRID" - Best for agents with a small number of + // examples in intents and/or wide + // use of templates syntax and composite entities. + // "MATCH_MODE_ML_ONLY" - Can be used for agents with a large number + // of examples in intents, + // especially the ones using @sys.any or very large developer entities. + MatchMode string `json:"matchMode,omitempty"` + + // Parent: Required. The project of this agent. + // Format: `projects/`. + Parent string `json:"parent,omitempty"` + + // SupportedLanguageCodes: Optional. The list of all languages supported + // by this agent (except for the + // `default_language_code`). + SupportedLanguageCodes []string `json:"supportedLanguageCodes,omitempty"` + + // TimeZone: Required. The time zone of this agent from the + // [time zone database](https://www.iana.org/time-zones), + // e.g., + // America/New_York, Europe/Paris. + TimeZone string `json:"timeZone,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AvatarUri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AvatarUri") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2Agent) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2Agent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2Agent) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2Agent + var s1 struct { + ClassificationThreshold gensupport.JSONFloat64 `json:"classificationThreshold"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.ClassificationThreshold = float64(s1.ClassificationThreshold) + return nil +} + +// GoogleCloudDialogflowV2BatchCreateEntitiesRequest: The request +// message for EntityTypes.BatchCreateEntities. +type GoogleCloudDialogflowV2BatchCreateEntitiesRequest struct { + // Entities: Required. The collection of entities to create. + Entities []*GoogleCloudDialogflowV2EntityTypeEntity `json:"entities,omitempty"` + + // LanguageCode: Optional. The language of entity synonyms defined in + // `entities`. If not + // specified, the agent's default language is used. + // [More than a + // dozen + // languages](https://dialogflow.com/docs/reference/language) are + // supported. + // Note: languages must be enabled in the agent, before they can be + // used. + LanguageCode string `json:"languageCode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entities") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entities") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchCreateEntitiesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchCreateEntitiesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchDeleteEntitiesRequest: The request +// message for EntityTypes.BatchDeleteEntities. +type GoogleCloudDialogflowV2BatchDeleteEntitiesRequest struct { + // EntityValues: Required. The canonical `values` of the entities to + // delete. Note that + // these are not fully-qualified names, i.e. they don't start + // with + // `projects/`. + EntityValues []string `json:"entityValues,omitempty"` + + // LanguageCode: Optional. The language of entity synonyms defined in + // `entities`. If not + // specified, the agent's default language is used. + // [More than a + // dozen + // languages](https://dialogflow.com/docs/reference/language) are + // supported. + // Note: languages must be enabled in the agent, before they can be + // used. + LanguageCode string `json:"languageCode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityValues") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityValues") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchDeleteEntitiesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchDeleteEntitiesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest: The request +// message for EntityTypes.BatchDeleteEntityTypes. +type GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest struct { + // EntityTypeNames: Required. The names entity types to delete. All + // names must point to the + // same agent as `parent`. + EntityTypeNames []string `json:"entityTypeNames,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityTypeNames") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityTypeNames") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchDeleteIntentsRequest: The request message +// for Intents.BatchDeleteIntents. +type GoogleCloudDialogflowV2BatchDeleteIntentsRequest struct { + // Intents: Required. The collection of intents to delete. Only intent + // `name` must be + // filled in. + Intents []*GoogleCloudDialogflowV2Intent `json:"intents,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Intents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Intents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchDeleteIntentsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchDeleteIntentsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchUpdateEntitiesRequest: The response +// message for EntityTypes.BatchCreateEntities. +type GoogleCloudDialogflowV2BatchUpdateEntitiesRequest struct { + // Entities: Required. The collection of new entities to replace the + // existing entities. + Entities []*GoogleCloudDialogflowV2EntityTypeEntity `json:"entities,omitempty"` + + // LanguageCode: Optional. The language of entity synonyms defined in + // `entities`. If not + // specified, the agent's default language is used. + // [More than a + // dozen + // languages](https://dialogflow.com/docs/reference/language) are + // supported. + // Note: languages must be enabled in the agent, before they can be + // used. + LanguageCode string `json:"languageCode,omitempty"` + + // UpdateMask: Optional. The mask to control which fields get updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entities") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entities") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchUpdateEntitiesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchUpdateEntitiesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest: The request +// message for EntityTypes.BatchUpdateEntityTypes. +type GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest struct { + // EntityTypeBatchInline: The collection of entity type to update or + // create. + EntityTypeBatchInline *GoogleCloudDialogflowV2EntityTypeBatch `json:"entityTypeBatchInline,omitempty"` + + // EntityTypeBatchUri: The URI to a Google Cloud Storage file containing + // entity types to update + // or create. The file format can either be a serialized proto + // (of + // EntityBatch type) or a JSON object. Note: The URI must start + // with + // "gs://". + EntityTypeBatchUri string `json:"entityTypeBatchUri,omitempty"` + + // LanguageCode: Optional. The language of entity synonyms defined in + // `entity_types`. If not + // specified, the agent's default language is used. + // [More than a + // dozen + // languages](https://dialogflow.com/docs/reference/language) are + // supported. + // Note: languages must be enabled in the agent, before they can be + // used. + LanguageCode string `json:"languageCode,omitempty"` + + // UpdateMask: Optional. The mask to control which fields get updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EntityTypeBatchInline") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityTypeBatchInline") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchUpdateEntityTypesResponse: The response +// message for EntityTypes.BatchUpdateEntityTypes. +type GoogleCloudDialogflowV2BatchUpdateEntityTypesResponse struct { + // EntityTypes: The collection of updated or created entity types. + EntityTypes []*GoogleCloudDialogflowV2EntityType `json:"entityTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchUpdateEntityTypesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchUpdateEntityTypesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchUpdateIntentsRequest: The request message +// for Intents.BatchUpdateIntents. +type GoogleCloudDialogflowV2BatchUpdateIntentsRequest struct { + // IntentBatchInline: The collection of intents to update or create. + IntentBatchInline *GoogleCloudDialogflowV2IntentBatch `json:"intentBatchInline,omitempty"` + + // IntentBatchUri: The URI to a Google Cloud Storage file containing + // intents to update or + // create. The file format can either be a serialized proto (of + // IntentBatch + // type) or JSON object. Note: The URI must start with "gs://". + IntentBatchUri string `json:"intentBatchUri,omitempty"` + + // IntentView: Optional. The resource view to apply to the returned + // intent. + // + // Possible values: + // "INTENT_VIEW_UNSPECIFIED" - Training phrases field is not populated + // in the response. + // "INTENT_VIEW_FULL" - All fields are populated. + IntentView string `json:"intentView,omitempty"` + + // LanguageCode: Optional. The language of training phrases, parameters + // and rich messages + // defined in `intents`. If not specified, the agent's default language + // is + // used. [More than a + // dozen + // languages](https://dialogflow.com/docs/reference/language) are + // supported. + // Note: languages must be enabled in the agent, before they can be + // used. + LanguageCode string `json:"languageCode,omitempty"` + + // UpdateMask: Optional. The mask to control which fields get updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IntentBatchInline") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IntentBatchInline") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchUpdateIntentsRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchUpdateIntentsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2BatchUpdateIntentsResponse: The response +// message for Intents.BatchUpdateIntents. +type GoogleCloudDialogflowV2BatchUpdateIntentsResponse struct { + // Intents: The collection of updated or created intents. + Intents []*GoogleCloudDialogflowV2Intent `json:"intents,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Intents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Intents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2BatchUpdateIntentsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2BatchUpdateIntentsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2Context: Represents a context. +type GoogleCloudDialogflowV2Context struct { + // LifespanCount: Optional. The number of conversational query requests + // after which the + // context expires. If set to `0` (the default) the context + // expires + // immediately. Contexts expire automatically after 10 minutes even if + // there + // are no matching queries. + LifespanCount int64 `json:"lifespanCount,omitempty"` + + // Name: Required. The unique identifier of the context. + // Format: + // `projects//agent/sessions//contexts/`, + // or + // `projects//agent/runtimes//sessions//contexts/`. + // Note: Runtimes are under construction and will be available soon. + // The Context ID is always converted to lowercase. + // If is not specified, we assume default 'sandbox' + // runtime. + Name string `json:"name,omitempty"` + + // Parameters: Optional. The collection of parameters associated with + // this context. + // Refer to [this + // doc](https://dialogflow.com/docs/actions-and-parameters) for + // syntax. + Parameters googleapi.RawMessage `json:"parameters,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "LifespanCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LifespanCount") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2Context) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2Context + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2DetectIntentRequest: The request to detect +// user's intent. +type GoogleCloudDialogflowV2DetectIntentRequest struct { + // InputAudio: Optional. The natural language speech audio to be + // processed. This field + // should be populated iff `query_input` is set to an input audio + // config. + // A single request can contain up to 1 minute of speech audio data. + InputAudio string `json:"inputAudio,omitempty"` + + // QueryInput: Required. The input specification. It can be set to: + // + // 1. an audio config + // which instructs the speech recognizer how to process the speech + // audio, + // + // 2. a conversational query in the form of text, or + // + // 3. an event that specifies which intent to trigger. + QueryInput *GoogleCloudDialogflowV2QueryInput `json:"queryInput,omitempty"` + + // QueryParams: Optional. The parameters of this query. + QueryParams *GoogleCloudDialogflowV2QueryParameters `json:"queryParams,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InputAudio") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InputAudio") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2DetectIntentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2DetectIntentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2DetectIntentResponse: The message returned +// from the DetectIntent method. +type GoogleCloudDialogflowV2DetectIntentResponse struct { + // QueryResult: The results of the conversational query or event + // processing. + QueryResult *GoogleCloudDialogflowV2QueryResult `json:"queryResult,omitempty"` + + // ResponseId: The unique identifier of the response. It can be used + // to + // locate a response in the training example set or for reporting + // issues. + ResponseId string `json:"responseId,omitempty"` + + // WebhookStatus: Specifies the status of the webhook request. + // `webhook_status` + // is never populated in webhook requests. + WebhookStatus *GoogleRpcStatus `json:"webhookStatus,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "QueryResult") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QueryResult") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2DetectIntentResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2DetectIntentResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2EntityType: Represents an entity type. +// Entity types serve as a tool for extracting parameter values from +// natural +// language queries. +type GoogleCloudDialogflowV2EntityType struct { + // AutoExpansionMode: Optional. Indicates whether the entity type can be + // automatically + // expanded. + // + // Possible values: + // "AUTO_EXPANSION_MODE_UNSPECIFIED" - Auto expansion disabled for the + // entity. + // "AUTO_EXPANSION_MODE_DEFAULT" - Allows an agent to recognize values + // that have not been explicitly + // listed in the entity. + AutoExpansionMode string `json:"autoExpansionMode,omitempty"` + + // DisplayName: Required. The name of the entity. + DisplayName string `json:"displayName,omitempty"` + + // Entities: Optional. The collection of entities associated with the + // entity type. + Entities []*GoogleCloudDialogflowV2EntityTypeEntity `json:"entities,omitempty"` + + // Kind: Required. Indicates the kind of entity type. + // + // Possible values: + // "KIND_UNSPECIFIED" - Not specified. This value should be never + // used. + // "KIND_MAP" - Map entity types allow mapping of a group of synonyms + // to a canonical + // value. + // "KIND_LIST" - List entity types contain a set of entries that do + // not map to canonical + // values. However, list entity types can contain references to other + // entity + // types (with or without aliases). + Kind string `json:"kind,omitempty"` + + // Name: Required for all methods except `create` (`create` populates + // the name + // automatically. + // The unique identifier of the entity type. Format: + // `projects//agent/entityTypes/`. + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AutoExpansionMode") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoExpansionMode") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2EntityType) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2EntityType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2EntityTypeBatch: This message is a wrapper +// around a collection of entity types. +type GoogleCloudDialogflowV2EntityTypeBatch struct { + // EntityTypes: A collection of entity types. + EntityTypes []*GoogleCloudDialogflowV2EntityType `json:"entityTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2EntityTypeBatch) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2EntityTypeBatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2EntityTypeEntity: Optional. Represents an +// entity. +type GoogleCloudDialogflowV2EntityTypeEntity struct { + // Synonyms: Required. A collection of synonyms. For `KIND_LIST` entity + // types this + // must contain exactly one synonym equal to `value`. + Synonyms []string `json:"synonyms,omitempty"` + + // Value: Required. + // For `KIND_MAP` entity types: + // A canonical name to be used in place of synonyms. + // For `KIND_LIST` entity types: + // A string that can contain references to other entity types (with + // or + // without aliases). + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Synonyms") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Synonyms") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2EntityTypeEntity) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2EntityTypeEntity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2EventInput: Events allow for matching intents +// by event name instead of the natural +// language input. For instance, input `` can trigger a personalized +// welcome response. +// The parameter `name` may be used by the agent in the +// response: +// `“Hello #welcome_event.name! What can I do for you today?”`. +type GoogleCloudDialogflowV2EventInput struct { + // LanguageCode: Required. The language of this query. See + // [Language + // Support](https://dialogflow.com/docs/languages) for a list of + // the + // currently supported language codes. Note that queries in the same + // session + // do not necessarily need to specify the same language. + LanguageCode string `json:"languageCode,omitempty"` + + // Name: Required. The unique identifier of the event. + Name string `json:"name,omitempty"` + + // Parameters: Optional. The collection of parameters associated with + // the event. + Parameters googleapi.RawMessage `json:"parameters,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LanguageCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LanguageCode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2EventInput) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2EventInput + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ExportAgentRequest: The request message for +// Agents.ExportAgent. +type GoogleCloudDialogflowV2ExportAgentRequest struct { + // AgentUri: Optional. The Google Cloud Storage URI to export the agent + // to. + // Note: The URI must start with + // "gs://". If left unspecified, the serialized agent is returned + // inline. + AgentUri string `json:"agentUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AgentUri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AgentUri") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ExportAgentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ExportAgentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ExportAgentResponse: The response message for +// Agents.ExportAgent. +type GoogleCloudDialogflowV2ExportAgentResponse struct { + // AgentContent: The exported agent. + // + // Example for how to export an agent to a zip file via a command + // line: + // + // curl \ + // + // 'https://dialogflow.googleapis.com/v2/projects//agent:ex + // port'\ + // -X POST \ + // -H 'Authorization: Bearer '$(gcloud auth print-access-token) \ + // -H 'Accept: application/json' \ + // -H 'Content-Type: application/json' \ + // --compressed \ + // --data-binary '{}' \ + // | grep agentContent | sed -e 's/.*"agentContent": "\([^"]*\)".*/\1/' + // \ + // | base64 --decode > + AgentContent string `json:"agentContent,omitempty"` + + // AgentUri: The URI to a file containing the exported agent. This field + // is populated + // only if `agent_uri` is specified in `ExportAgentRequest`. + AgentUri string `json:"agentUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AgentContent") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AgentContent") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ExportAgentResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ExportAgentResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ImportAgentRequest: The request message for +// Agents.ImportAgent. +type GoogleCloudDialogflowV2ImportAgentRequest struct { + // AgentContent: The agent to import. + // + // Example for how to import an agent via the command line: + // + // curl \ + // + // 'https://dialogflow.googleapis.com/v2/projects//agent:im + // port\ + // -X POST \ + // -H 'Authorization: Bearer '$(gcloud auth print-access-token) \ + // -H 'Accept: application/json' \ + // -H 'Content-Type: application/json' \ + // --compressed \ + // --data-binary "{ + // 'agentContent': '$(cat | base64 -w 0)' + // }" + AgentContent string `json:"agentContent,omitempty"` + + // AgentUri: The URI to a Google Cloud Storage file containing the agent + // to import. + // Note: The URI must start with "gs://". + AgentUri string `json:"agentUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AgentContent") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AgentContent") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ImportAgentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ImportAgentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2InputAudioConfig: Instructs the speech +// recognizer how to process the audio content. +type GoogleCloudDialogflowV2InputAudioConfig struct { + // AudioEncoding: Required. Audio encoding of the audio content to + // process. + // + // Possible values: + // "AUDIO_ENCODING_UNSPECIFIED" - Not specified. + // "AUDIO_ENCODING_LINEAR_16" - Uncompressed 16-bit signed + // little-endian samples (Linear PCM). + // "AUDIO_ENCODING_FLAC" - + // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless + // Audio + // Codec) is the recommended encoding because it is lossless + // (therefore + // recognition is not compromised) and requires only about half + // the + // bandwidth of `LINEAR16`. `FLAC` stream encoding supports 16-bit + // and + // 24-bit samples, however, not all fields in `STREAMINFO` are + // supported. + // "AUDIO_ENCODING_MULAW" - 8-bit samples that compand 14-bit audio + // samples using G.711 PCMU/mu-law. + // "AUDIO_ENCODING_AMR" - Adaptive Multi-Rate Narrowband codec. + // `sample_rate_hertz` must be 8000. + // "AUDIO_ENCODING_AMR_WB" - Adaptive Multi-Rate Wideband codec. + // `sample_rate_hertz` must be 16000. + // "AUDIO_ENCODING_OGG_OPUS" - Opus encoded audio frames in Ogg + // container + // ([OggOpus](https://wiki.xiph.org/OggOpus)). + // `sample_rate_her + // tz` must be 16000. + // "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE" - Although the use of lossy + // encodings is not recommended, if a very low + // bitrate encoding is required, `OGG_OPUS` is highly preferred + // over + // Speex encoding. The [Speex](https://speex.org/) encoding supported + // by + // Dialogflow API has a header byte in each block, as in MIME + // type + // `audio/x-speex-with-header-byte`. + // It is a variant of the RTP Speex encoding defined in + // [RFC 5574](https://tools.ietf.org/html/rfc5574). + // The stream is a sequence of blocks, one block per RTP packet. Each + // block + // starts with a byte containing the length of the block, in bytes, + // followed + // by one or more frames of Speex data, padded to an integral number + // of + // bytes (octets) as specified in RFC 5574. In other words, each RTP + // header + // is replaced with a single byte containing the block length. Only + // Speex + // wideband is supported. `sample_rate_hertz` must be 16000. + AudioEncoding string `json:"audioEncoding,omitempty"` + + // LanguageCode: Required. The language of the supplied audio. + // Dialogflow does not do + // translations. See + // [Language + // Support](https://dialogflow.com/docs/languages) for a list of + // the + // currently supported language codes. Note that queries in the same + // session + // do not necessarily need to specify the same language. + LanguageCode string `json:"languageCode,omitempty"` + + // PhraseHints: Optional. The collection of phrase hints which are used + // to boost accuracy + // of speech recognition. + // Refer to [Cloud Speech API + // documentation](/speech/docs/basics#phrase-hints) + // for more details. + PhraseHints []string `json:"phraseHints,omitempty"` + + // SampleRateHertz: Required. Sample rate (in Hertz) of the audio + // content sent in the query. + // Refer to [Cloud Speech API documentation](/speech/docs/basics) for + // more + // details. + SampleRateHertz int64 `json:"sampleRateHertz,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AudioEncoding") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AudioEncoding") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2InputAudioConfig) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2InputAudioConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2Intent: Represents an intent. +// Intents convert a number of user expressions or patterns into an +// action. An +// action is an extraction of a user command or sentence semantics. +type GoogleCloudDialogflowV2Intent struct { + // Action: Optional. The name of the action associated with the intent. + Action string `json:"action,omitempty"` + + // DefaultResponsePlatforms: Optional. The list of platforms for which + // the first response will be + // taken from among the messages assigned to the DEFAULT_PLATFORM. + // + // Possible values: + // "PLATFORM_UNSPECIFIED" - Not specified. + // "FACEBOOK" - Facebook. + // "SLACK" - Slack. + // "TELEGRAM" - Telegram. + // "KIK" - Kik. + // "SKYPE" - Skype. + // "LINE" - Line. + // "VIBER" - Viber. + // "ACTIONS_ON_GOOGLE" - Actions on Google. + DefaultResponsePlatforms []string `json:"defaultResponsePlatforms,omitempty"` + + // DisplayName: Required. The name of this intent. + DisplayName string `json:"displayName,omitempty"` + + // Events: Optional. The collection of event names that trigger the + // intent. + // If the collection of input contexts is not empty, all of the contexts + // must + // be present in the active user session for an event to trigger this + // intent. + Events []string `json:"events,omitempty"` + + // FollowupIntentInfo: Optional. Collection of information about all + // followup intents that have + // name of this intent as a root_name. + FollowupIntentInfo []*GoogleCloudDialogflowV2IntentFollowupIntentInfo `json:"followupIntentInfo,omitempty"` + + // InputContextNames: Optional. The list of context names required for + // this intent to be + // triggered. + // Format: `projects//agent/sessions/-/contexts/`. + InputContextNames []string `json:"inputContextNames,omitempty"` + + // IsFallback: Optional. Indicates whether this is a fallback intent. + IsFallback bool `json:"isFallback,omitempty"` + + // Messages: Optional. The collection of rich messages corresponding to + // the + // `Response` field in API.AI console. + Messages []*GoogleCloudDialogflowV2IntentMessage `json:"messages,omitempty"` + + // MlDisabled: Optional. Indicates whether Machine Learning is disabled + // for the intent. + // Note: If `ml_diabled` setting is set to true, then this intent is + // not + // taken into account during inference in `ML ONLY` match mode. + // Also, + // auto-markup in the UI is turned off. + MlDisabled bool `json:"mlDisabled,omitempty"` + + // Name: Required for all methods except `create` (`create` populates + // the name + // automatically. + // The unique identifier of this intent. + // Format: `projects//agent/intents/`. + Name string `json:"name,omitempty"` + + // OutputContexts: Optional. The collection of contexts that are + // activated when the intent + // is matched. Context messages in this collection should not set + // the + // parameters field. Setting the `lifespan_count` to 0 will reset the + // context + // when the intent is matched. + // Format: `projects//agent/sessions/-/contexts/`. + OutputContexts []*GoogleCloudDialogflowV2Context `json:"outputContexts,omitempty"` + + // Parameters: Optional. The collection of parameters associated with + // the intent. + Parameters []*GoogleCloudDialogflowV2IntentParameter `json:"parameters,omitempty"` + + // ParentFollowupIntentName: The unique identifier of the parent intent + // in the chain of followup + // intents. + // It identifies the parent followup intent. + // Format: `projects//agent/intents/`. + ParentFollowupIntentName string `json:"parentFollowupIntentName,omitempty"` + + // Priority: Optional. The priority of this intent. Higher numbers + // represent higher + // priorities. Zero or negative numbers mean that the intent is + // disabled. + Priority int64 `json:"priority,omitempty"` + + // ResetContexts: Optional. Indicates whether to delete all contexts in + // the current + // session when this intent is matched. + ResetContexts bool `json:"resetContexts,omitempty"` + + // RootFollowupIntentName: The unique identifier of the root intent in + // the chain of followup intents. + // It identifies the correct followup intents chain for this + // intent. + // Format: `projects//agent/intents/`. + RootFollowupIntentName string `json:"rootFollowupIntentName,omitempty"` + + // TrainingPhrases: Optional. The collection of examples/templates that + // the agent is + // trained on. + TrainingPhrases []*GoogleCloudDialogflowV2IntentTrainingPhrase `json:"trainingPhrases,omitempty"` + + // WebhookState: Required. Indicates whether webhooks are enabled for + // the intent. + // + // Possible values: + // "WEBHOOK_STATE_UNSPECIFIED" - Webhook is disabled in the agent and + // in the intent. + // "WEBHOOK_STATE_ENABLED" - Webhook is enabled in the agent and in + // the intent. + // "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING" - Webhook is enabled in + // the agent and in the intent. Also, each slot + // filling prompt is forwarded to the webhook. + WebhookState string `json:"webhookState,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2Intent) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2Intent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentBatch: This message is a wrapper around +// a collection of intents. +type GoogleCloudDialogflowV2IntentBatch struct { + // Intents: A collection of intents. + Intents []*GoogleCloudDialogflowV2Intent `json:"intents,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Intents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Intents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentBatch) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentBatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentFollowupIntentInfo: Represents a single +// followup intent in the chain. +type GoogleCloudDialogflowV2IntentFollowupIntentInfo struct { + // FollowupIntentName: The unique identifier of the followup + // intent. + // Format: `projects//agent/intents/`. + FollowupIntentName string `json:"followupIntentName,omitempty"` + + // ParentFollowupIntentName: The unique identifier of the followup + // intent parent. + // Format: `projects//agent/intents/`. + ParentFollowupIntentName string `json:"parentFollowupIntentName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FollowupIntentName") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FollowupIntentName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentFollowupIntentInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentFollowupIntentInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessage: Corresponds to the `Response` +// field in API.AI console. +type GoogleCloudDialogflowV2IntentMessage struct { + // BasicCard: The basic card response for Actions on Google. + BasicCard *GoogleCloudDialogflowV2IntentMessageBasicCard `json:"basicCard,omitempty"` + + // Card: The card response. + Card *GoogleCloudDialogflowV2IntentMessageCard `json:"card,omitempty"` + + // CarouselSelect: The carousel card response for Actions on Google. + CarouselSelect *GoogleCloudDialogflowV2IntentMessageCarouselSelect `json:"carouselSelect,omitempty"` + + // Image: The image response. + Image *GoogleCloudDialogflowV2IntentMessageImage `json:"image,omitempty"` + + // LinkOutSuggestion: The link out suggestion chip for Actions on + // Google. + LinkOutSuggestion *GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion `json:"linkOutSuggestion,omitempty"` + + // ListSelect: The list card response for Actions on Google. + ListSelect *GoogleCloudDialogflowV2IntentMessageListSelect `json:"listSelect,omitempty"` + + // Payload: The response containing a custom payload. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // Platform: Optional. The platform that this message is intended for. + // + // Possible values: + // "PLATFORM_UNSPECIFIED" - Not specified. + // "FACEBOOK" - Facebook. + // "SLACK" - Slack. + // "TELEGRAM" - Telegram. + // "KIK" - Kik. + // "SKYPE" - Skype. + // "LINE" - Line. + // "VIBER" - Viber. + // "ACTIONS_ON_GOOGLE" - Actions on Google. + Platform string `json:"platform,omitempty"` + + // QuickReplies: The quick replies response. + QuickReplies *GoogleCloudDialogflowV2IntentMessageQuickReplies `json:"quickReplies,omitempty"` + + // SimpleResponses: The voice and text-only responses for Actions on + // Google. + SimpleResponses *GoogleCloudDialogflowV2IntentMessageSimpleResponses `json:"simpleResponses,omitempty"` + + // Suggestions: The suggestion chips for Actions on Google. + Suggestions *GoogleCloudDialogflowV2IntentMessageSuggestions `json:"suggestions,omitempty"` + + // Text: The text response. + Text *GoogleCloudDialogflowV2IntentMessageText `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BasicCard") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BasicCard") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessage) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageBasicCard: The basic card +// message. Useful for displaying information. +type GoogleCloudDialogflowV2IntentMessageBasicCard struct { + // Buttons: Optional. The collection of card buttons. + Buttons []*GoogleCloudDialogflowV2IntentMessageBasicCardButton `json:"buttons,omitempty"` + + // FormattedText: Required, unless image is present. The body text of + // the card. + FormattedText string `json:"formattedText,omitempty"` + + // Image: Optional. The image for the card. + Image *GoogleCloudDialogflowV2IntentMessageImage `json:"image,omitempty"` + + // Subtitle: Optional. The subtitle of the card. + Subtitle string `json:"subtitle,omitempty"` + + // Title: Optional. The title of the card. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Buttons") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Buttons") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageBasicCard) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageBasicCard + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageBasicCardButton: The button +// object that appears at the bottom of a card. +type GoogleCloudDialogflowV2IntentMessageBasicCardButton struct { + // OpenUriAction: Required. Action to take when a user taps on the + // button. + OpenUriAction *GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction `json:"openUriAction,omitempty"` + + // Title: Required. The title of the button. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OpenUriAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OpenUriAction") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageBasicCardButton) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageBasicCardButton + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction: +// Opens the given URI. +type GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction struct { + // Uri: Required. The HTTP or HTTPS scheme URI. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Uri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Uri") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageBasicCardButtonOpenUriAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageCard: The card response message. +type GoogleCloudDialogflowV2IntentMessageCard struct { + // Buttons: Optional. The collection of card buttons. + Buttons []*GoogleCloudDialogflowV2IntentMessageCardButton `json:"buttons,omitempty"` + + // ImageUri: Optional. The public URI to an image file for the card. + ImageUri string `json:"imageUri,omitempty"` + + // Subtitle: Optional. The subtitle of the card. + Subtitle string `json:"subtitle,omitempty"` + + // Title: Optional. The title of the card. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Buttons") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Buttons") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageCard) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageCard + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageCardButton: Optional. Contains +// information about a button. +type GoogleCloudDialogflowV2IntentMessageCardButton struct { + // Postback: Optional. The text to send back to the Dialogflow API or a + // URI to + // open. + Postback string `json:"postback,omitempty"` + + // Text: Optional. The text to show on the button. + Text string `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Postback") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Postback") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageCardButton) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageCardButton + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageCarouselSelect: The card for +// presenting a carousel of options to select from. +type GoogleCloudDialogflowV2IntentMessageCarouselSelect struct { + // Items: Required. Carousel items. + Items []*GoogleCloudDialogflowV2IntentMessageCarouselSelectItem `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageCarouselSelect) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageCarouselSelect + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageCarouselSelectItem: An item in +// the carousel. +type GoogleCloudDialogflowV2IntentMessageCarouselSelectItem struct { + // Description: Optional. The body text of the card. + Description string `json:"description,omitempty"` + + // Image: Optional. The image to display. + Image *GoogleCloudDialogflowV2IntentMessageImage `json:"image,omitempty"` + + // Info: Required. Additional info about the option item. + Info *GoogleCloudDialogflowV2IntentMessageSelectItemInfo `json:"info,omitempty"` + + // Title: Required. Title of the carousel item. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageCarouselSelectItem) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageCarouselSelectItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageImage: The image response +// message. +type GoogleCloudDialogflowV2IntentMessageImage struct { + // AccessibilityText: Optional. A text description of the image to be + // used for accessibility, + // e.g., screen readers. + AccessibilityText string `json:"accessibilityText,omitempty"` + + // ImageUri: Optional. The public URI to an image file. + ImageUri string `json:"imageUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessibilityText") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessibilityText") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageImage) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageImage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion: The suggestion +// chip message that allows the user to jump out to the app +// or website associated with this agent. +type GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion struct { + // DestinationName: Required. The name of the app or site this chip is + // linking to. + DestinationName string `json:"destinationName,omitempty"` + + // Uri: Required. The URI of the app or site to open when the user taps + // the + // suggestion chip. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageLinkOutSuggestion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageListSelect: The card for +// presenting a list of options to select from. +type GoogleCloudDialogflowV2IntentMessageListSelect struct { + // Items: Required. List items. + Items []*GoogleCloudDialogflowV2IntentMessageListSelectItem `json:"items,omitempty"` + + // Title: Optional. The overall title of the list. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageListSelect) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageListSelect + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageListSelectItem: An item in the +// list. +type GoogleCloudDialogflowV2IntentMessageListSelectItem struct { + // Description: Optional. The main text describing the item. + Description string `json:"description,omitempty"` + + // Image: Optional. The image to display. + Image *GoogleCloudDialogflowV2IntentMessageImage `json:"image,omitempty"` + + // Info: Required. Additional information about this option. + Info *GoogleCloudDialogflowV2IntentMessageSelectItemInfo `json:"info,omitempty"` + + // Title: Required. The title of the list item. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageListSelectItem) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageListSelectItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageQuickReplies: The quick replies +// response message. +type GoogleCloudDialogflowV2IntentMessageQuickReplies struct { + // QuickReplies: Optional. The collection of quick replies. + QuickReplies []string `json:"quickReplies,omitempty"` + + // Title: Optional. The title of the collection of quick replies. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuickReplies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuickReplies") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageQuickReplies) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageQuickReplies + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageSelectItemInfo: Additional info +// about the select item for when it is triggered in a +// dialog. +type GoogleCloudDialogflowV2IntentMessageSelectItemInfo struct { + // Key: Required. A unique key that will be sent back to the agent if + // this + // response is given. + Key string `json:"key,omitempty"` + + // Synonyms: Optional. A list of synonyms that can also be used to + // trigger this + // item in dialog. + Synonyms []string `json:"synonyms,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageSelectItemInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageSelectItemInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageSimpleResponse: The simple +// response message containing speech or text. +type GoogleCloudDialogflowV2IntentMessageSimpleResponse struct { + // DisplayText: Optional. The text to display. + DisplayText string `json:"displayText,omitempty"` + + // Ssml: One of text_to_speech or ssml must be provided. Structured + // spoken + // response to the user in the SSML format. Mutually exclusive + // with + // text_to_speech. + Ssml string `json:"ssml,omitempty"` + + // TextToSpeech: One of text_to_speech or ssml must be provided. The + // plain text of the + // speech output. Mutually exclusive with ssml. + TextToSpeech string `json:"textToSpeech,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayText") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayText") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageSimpleResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageSimpleResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageSimpleResponses: The collection +// of simple response candidates. +// This message in `QueryResult.fulfillment_messages` +// and +// `WebhookResponse.fulfillment_messages` should contain only +// one +// `SimpleResponse`. +type GoogleCloudDialogflowV2IntentMessageSimpleResponses struct { + // SimpleResponses: Required. The list of simple responses. + SimpleResponses []*GoogleCloudDialogflowV2IntentMessageSimpleResponse `json:"simpleResponses,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SimpleResponses") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SimpleResponses") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageSimpleResponses) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageSimpleResponses + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageSuggestion: The suggestion chip +// message that the user can tap to quickly post a reply +// to the conversation. +type GoogleCloudDialogflowV2IntentMessageSuggestion struct { + // Title: Required. The text shown the in the suggestion chip. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Title") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Title") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageSuggestion) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageSuggestion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageSuggestions: The collection of +// suggestions. +type GoogleCloudDialogflowV2IntentMessageSuggestions struct { + // Suggestions: Required. The list of suggested replies. + Suggestions []*GoogleCloudDialogflowV2IntentMessageSuggestion `json:"suggestions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Suggestions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Suggestions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageSuggestions) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageSuggestions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentMessageText: The text response message. +type GoogleCloudDialogflowV2IntentMessageText struct { + // Text: Optional. The collection of the agent's responses. + Text []string `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Text") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Text") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentMessageText) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentMessageText + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentParameter: Represents intent parameters. +type GoogleCloudDialogflowV2IntentParameter struct { + // DefaultValue: Optional. The default value to use when the `value` + // yields an empty + // result. + // Default values can be extracted from contexts by using the + // following + // syntax: `#context_name.parameter_name`. + DefaultValue string `json:"defaultValue,omitempty"` + + // DisplayName: Required. The name of the parameter. + DisplayName string `json:"displayName,omitempty"` + + // EntityTypeDisplayName: Optional. The name of the entity type, + // prefixed with `@`, that + // describes values of the parameter. If the parameter is + // required, this must be provided. + EntityTypeDisplayName string `json:"entityTypeDisplayName,omitempty"` + + // IsList: Optional. Indicates whether the parameter represents a list + // of values. + IsList bool `json:"isList,omitempty"` + + // Mandatory: Optional. Indicates whether the parameter is required. + // That is, + // whether the intent cannot be completed without collecting the + // parameter + // value. + Mandatory bool `json:"mandatory,omitempty"` + + // Name: The unique identifier of this parameter. + Name string `json:"name,omitempty"` + + // Prompts: Optional. The collection of prompts that the agent can + // present to the + // user in order to collect value for the parameter. + Prompts []string `json:"prompts,omitempty"` + + // Value: Optional. The definition of the parameter value. It can be: + // - a constant string, + // - a parameter value defined as `$parameter_name`, + // - an original parameter value defined as + // `$parameter_name.original`, + // - a parameter value from some context defined as + // `#context_name.parameter_name`. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultValue") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentParameter) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentParameter + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentTrainingPhrase: Represents an example or +// template that the agent is trained on. +type GoogleCloudDialogflowV2IntentTrainingPhrase struct { + // Name: Required. The unique identifier of this training phrase. + Name string `json:"name,omitempty"` + + // Parts: Required. The collection of training phrase parts (can be + // annotated). + // Fields: `entity_type`, `alias` and `user_defined` should be + // populated + // only for the annotated parts of the training phrase. + Parts []*GoogleCloudDialogflowV2IntentTrainingPhrasePart `json:"parts,omitempty"` + + // TimesAddedCount: Optional. Indicates how many times this example or + // template was added to + // the intent. Each time a developer adds an existing sample by editing + // an + // intent or training, this counter is increased. + TimesAddedCount int64 `json:"timesAddedCount,omitempty"` + + // Type: Required. The type of the training phrase. + // + // Possible values: + // "TYPE_UNSPECIFIED" - Not specified. This value should never be + // used. + // "EXAMPLE" - Examples do not contain @-prefixed entity type names, + // but example parts + // can be annotated with entity types. + // "TEMPLATE" - Templates are not annotated with entity types, but + // they can contain + // @-prefixed entity type names as substrings. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentTrainingPhrase) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentTrainingPhrase + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2IntentTrainingPhrasePart: Represents a part of +// a training phrase. +type GoogleCloudDialogflowV2IntentTrainingPhrasePart struct { + // Alias: Optional. The parameter name for the value extracted from + // the + // annotated part of the example. + Alias string `json:"alias,omitempty"` + + // EntityType: Optional. The entity type name prefixed with `@`. This + // field is + // required for the annotated part of the text and applies only + // to + // examples. + EntityType string `json:"entityType,omitempty"` + + // Text: Required. The text corresponding to the example or template, + // if there are no annotations. For + // annotated examples, it is the text for one of the example's parts. + Text string `json:"text,omitempty"` + + // UserDefined: Optional. Indicates whether the text was manually + // annotated by the + // developer. + UserDefined bool `json:"userDefined,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Alias") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Alias") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2IntentTrainingPhrasePart) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2IntentTrainingPhrasePart + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ListContextsResponse: The response message for +// Contexts.ListContexts. +type GoogleCloudDialogflowV2ListContextsResponse struct { + // Contexts: The list of contexts. There will be a maximum number of + // items + // returned based on the page_size field in the request. + Contexts []*GoogleCloudDialogflowV2Context `json:"contexts,omitempty"` + + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no + // more results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Contexts") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Contexts") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ListContextsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ListContextsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ListEntityTypesResponse: The response message +// for EntityTypes.ListEntityTypes. +type GoogleCloudDialogflowV2ListEntityTypesResponse struct { + // EntityTypes: The list of agent entity types. There will be a maximum + // number of items + // returned based on the page_size field in the request. + EntityTypes []*GoogleCloudDialogflowV2EntityType `json:"entityTypes,omitempty"` + + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no + // more results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EntityTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ListEntityTypesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ListEntityTypesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ListIntentsResponse: The response message for +// Intents.ListIntents. +type GoogleCloudDialogflowV2ListIntentsResponse struct { + // Intents: The list of agent intents. There will be a maximum number of + // items + // returned based on the page_size field in the request. + Intents []*GoogleCloudDialogflowV2Intent `json:"intents,omitempty"` + + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no + // more results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Intents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Intents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ListIntentsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ListIntentsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2ListSessionEntityTypesResponse: The response +// message for SessionEntityTypes.ListSessionEntityTypes. +type GoogleCloudDialogflowV2ListSessionEntityTypesResponse struct { + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no + // more results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SessionEntityTypes: The list of session entity types. There will be a + // maximum number of items + // returned based on the page_size field in the request. + SessionEntityTypes []*GoogleCloudDialogflowV2SessionEntityType `json:"sessionEntityTypes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2ListSessionEntityTypesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2ListSessionEntityTypesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2OriginalDetectIntentRequest: Represents the +// contents of the original request that was passed to +// the `[Streaming]DetectIntent` call. +type GoogleCloudDialogflowV2OriginalDetectIntentRequest struct { + // Payload: Optional. This field is set to the value of + // `QueryParameters.payload` field + // passed in the request. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // Source: The source of this request, e.g., `google`, `facebook`, + // `slack`. It is set + // by Dialogflow-owned servers. Possible values of this field correspond + // to + // Intent.Message.Platform. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Payload") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Payload") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2OriginalDetectIntentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2OriginalDetectIntentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2QueryInput: Represents the query input. It can +// contain either: +// +// 1. An audio config which +// instructs the speech recognizer how to process the speech +// audio. +// +// 2. A conversational query in the form of text,. +// +// 3. An event that specifies which intent to trigger. +type GoogleCloudDialogflowV2QueryInput struct { + // AudioConfig: Instructs the speech recognizer how to process the + // speech audio. + AudioConfig *GoogleCloudDialogflowV2InputAudioConfig `json:"audioConfig,omitempty"` + + // Event: The event to be processed. + Event *GoogleCloudDialogflowV2EventInput `json:"event,omitempty"` + + // Text: The natural language text to be processed. + Text *GoogleCloudDialogflowV2TextInput `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AudioConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AudioConfig") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2QueryInput) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2QueryInput + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2QueryParameters: Represents the parameters of +// the conversational query. +type GoogleCloudDialogflowV2QueryParameters struct { + // Contexts: Optional. The collection of contexts to be activated before + // this query is + // executed. + Contexts []*GoogleCloudDialogflowV2Context `json:"contexts,omitempty"` + + // GeoLocation: Optional. The geo location of this conversational query. + GeoLocation *GoogleTypeLatLng `json:"geoLocation,omitempty"` + + // Payload: Optional. This field can be used to pass custom data into + // the webhook + // associated with the agent. Arbitrary JSON objects are supported. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // ResetContexts: Optional. Specifies whether to delete all contexts in + // the current session + // before the new ones are activated. + ResetContexts bool `json:"resetContexts,omitempty"` + + // SessionEntityTypes: Optional. The collection of session entity types + // to replace or extend + // developer entities with for this query only. The entity synonyms + // apply + // to all languages. + SessionEntityTypes []*GoogleCloudDialogflowV2SessionEntityType `json:"sessionEntityTypes,omitempty"` + + // TimeZone: Optional. The time zone of this conversational query from + // the + // [time zone database](https://www.iana.org/time-zones), + // e.g., + // America/New_York, Europe/Paris. If not provided, the time zone + // specified in + // agent settings is used. + TimeZone string `json:"timeZone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Contexts") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Contexts") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2QueryParameters) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2QueryParameters + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2QueryResult: Represents the result of +// conversational query or event processing. +type GoogleCloudDialogflowV2QueryResult struct { + // Action: The action name from the matched intent. + Action string `json:"action,omitempty"` + + // AllRequiredParamsPresent: This field is set to: + // - `false` if the matched intent has required parameters and not all + // of + // the required parameter values have been collected. + // - `true` if all required parameter values have been collected, or if + // the + // matched intent doesn't contain any required parameters. + AllRequiredParamsPresent bool `json:"allRequiredParamsPresent,omitempty"` + + // DiagnosticInfo: The free-form diagnostic info. For example, this + // field + // could contain webhook call latency. + DiagnosticInfo googleapi.RawMessage `json:"diagnosticInfo,omitempty"` + + // FulfillmentMessages: The collection of rich messages to present to + // the user. + FulfillmentMessages []*GoogleCloudDialogflowV2IntentMessage `json:"fulfillmentMessages,omitempty"` + + // FulfillmentText: The text to be pronounced to the user or shown on + // the screen. + FulfillmentText string `json:"fulfillmentText,omitempty"` + + // Intent: The intent that matched the conversational query. Some, + // not + // all fields are filled in this message, including but not limited + // to: + // `name`, `display_name` and `webhook_state`. + Intent *GoogleCloudDialogflowV2Intent `json:"intent,omitempty"` + + // IntentDetectionConfidence: The intent detection confidence. Values + // range from 0.0 + // (completely uncertain) to 1.0 (completely certain). + IntentDetectionConfidence float64 `json:"intentDetectionConfidence,omitempty"` + + // LanguageCode: The language that was triggered during intent + // detection. + // See [Language + // Support](https://dialogflow.com/docs/reference/language) + // for a list of the currently supported language codes. + LanguageCode string `json:"languageCode,omitempty"` + + // OutputContexts: The collection of output contexts. If + // applicable, + // `output_contexts.parameters` contains entries with name + // `.original` containing the original parameter + // values + // before the query. + OutputContexts []*GoogleCloudDialogflowV2Context `json:"outputContexts,omitempty"` + + // Parameters: The collection of extracted parameters. + Parameters googleapi.RawMessage `json:"parameters,omitempty"` + + // QueryText: The original conversational query text: + // - If natural language text was provided as input, `query_text` + // contains + // a copy of the input. + // - If natural language speech audio was provided as input, + // `query_text` + // contains the speech recognition result. If speech recognizer + // produced + // multiple alternatives, a particular one is picked. + // - If an event was provided as input, `query_text` is not set. + QueryText string `json:"queryText,omitempty"` + + // SpeechRecognitionConfidence: The Speech recognition confidence + // between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words + // are + // correct. The default of 0.0 is a sentinel value indicating that + // confidence + // was not set. + // + // You should not rely on this field as it isn't guaranteed to be + // accurate, or + // even set. In particular this field isn't set in Webhook calls and + // for + // StreamingDetectIntent since the streaming endpoint has separate + // confidence + // estimates per portion of the audio in StreamingRecognitionResult. + SpeechRecognitionConfidence float64 `json:"speechRecognitionConfidence,omitempty"` + + // WebhookPayload: If the query was fulfilled by a webhook call, this + // field is set to the + // value of the `payload` field returned in the webhook response. + WebhookPayload googleapi.RawMessage `json:"webhookPayload,omitempty"` + + // WebhookSource: If the query was fulfilled by a webhook call, this + // field is set to the + // value of the `source` field returned in the webhook response. + WebhookSource string `json:"webhookSource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2QueryResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2QueryResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2QueryResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2QueryResult + var s1 struct { + IntentDetectionConfidence gensupport.JSONFloat64 `json:"intentDetectionConfidence"` + SpeechRecognitionConfidence gensupport.JSONFloat64 `json:"speechRecognitionConfidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.IntentDetectionConfidence = float64(s1.IntentDetectionConfidence) + s.SpeechRecognitionConfidence = float64(s1.SpeechRecognitionConfidence) + return nil +} + +// GoogleCloudDialogflowV2RestoreAgentRequest: The request message for +// Agents.RestoreAgent. +type GoogleCloudDialogflowV2RestoreAgentRequest struct { + // AgentContent: The agent to restore. + // + // Example for how to restore an agent via the command line: + // + // curl \ + // + // 'https://dialogflow.googleapis.com/v2/projects//agent:re + // store\ + // -X POST \ + // -H 'Authorization: Bearer '$(gcloud auth print-access-token) \ + // -H 'Accept: application/json' \ + // -H 'Content-Type: application/json' \ + // --compressed \ + // --data-binary "{ + // 'agentContent': '$(cat | base64 -w 0)' + // }" \ + AgentContent string `json:"agentContent,omitempty"` + + // AgentUri: The URI to a Google Cloud Storage file containing the agent + // to restore. + // Note: The URI must start with "gs://". + AgentUri string `json:"agentUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AgentContent") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AgentContent") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2RestoreAgentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2RestoreAgentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2SearchAgentsResponse: The response message for +// Agents.SearchAgents. +type GoogleCloudDialogflowV2SearchAgentsResponse struct { + // Agents: The list of agents. There will be a maximum number of items + // returned based + // on the page_size field in the request. + Agents []*GoogleCloudDialogflowV2Agent `json:"agents,omitempty"` + + // NextPageToken: Token to retrieve the next page of results, or empty + // if there are no + // more results in the list. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Agents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Agents") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2SearchAgentsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2SearchAgentsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2SessionEntityType: Represents a session entity +// type. +// +// Extends or replaces a developer entity type at the user session level +// (we +// refer to the entity types defined at the agent level as "developer +// entity +// types"). +// +// Note: session entity types apply to all queries, regardless of the +// language. +type GoogleCloudDialogflowV2SessionEntityType struct { + // Entities: Required. The collection of entities associated with this + // session entity + // type. + Entities []*GoogleCloudDialogflowV2EntityTypeEntity `json:"entities,omitempty"` + + // EntityOverrideMode: Required. Indicates whether the additional data + // should override or + // supplement the developer entity type definition. + // + // Possible values: + // "ENTITY_OVERRIDE_MODE_UNSPECIFIED" - Not specified. This value + // should be never used. + // "ENTITY_OVERRIDE_MODE_OVERRIDE" - The collection of session + // entities overrides the collection of entities + // in the corresponding developer entity type. + // "ENTITY_OVERRIDE_MODE_SUPPLEMENT" - The collection of session + // entities extends the collection of entities in + // the corresponding developer entity type. + // Calls to `ListSessionEntityTypes`, + // `GetSessionEntityType`, + // `CreateSessionEntityType` and `UpdateSessionEntityType` return the + // full + // collection of entities from the developer entity type in the + // agent's + // default language and the session entity type. + EntityOverrideMode string `json:"entityOverrideMode,omitempty"` + + // Name: Required. The unique identifier of this session entity type. + // Format: + // `projects//agent/sessions//entityTypes/`, or + // `projects//agent/runtimes/sessions//entityTypes/`. + // Note: Runtimes are under construction and will be available soon. + // If is not specified, we assume default 'sandbox' + // runtime. + Name string `json:"name,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Entities") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entities") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2SessionEntityType) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2SessionEntityType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2TextInput: Represents the natural language +// text to be processed. +type GoogleCloudDialogflowV2TextInput struct { + // LanguageCode: Required. The language of this conversational query. + // See [Language + // Support](https://dialogflow.com/docs/languages) for a list of + // the + // currently supported language codes. Note that queries in the same + // session + // do not necessarily need to specify the same language. + LanguageCode string `json:"languageCode,omitempty"` + + // Text: Required. The UTF-8 encoded natural language text to be + // processed. + // Text length must not exceed 256 bytes. + Text string `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LanguageCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LanguageCode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2TextInput) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2TextInput + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2TrainAgentRequest: The request message for +// Agents.TrainAgent. +type GoogleCloudDialogflowV2TrainAgentRequest struct { +} + +// GoogleCloudDialogflowV2WebhookRequest: The request message for a +// webhook call. +type GoogleCloudDialogflowV2WebhookRequest struct { + // OriginalDetectIntentRequest: Optional. The contents of the original + // request that was passed to + // `[Streaming]DetectIntent` call. + OriginalDetectIntentRequest *GoogleCloudDialogflowV2OriginalDetectIntentRequest `json:"originalDetectIntentRequest,omitempty"` + + // QueryResult: The result of the conversational query or event + // processing. Contains the + // same value as `[Streaming]DetectIntentResponse.query_result`. + QueryResult *GoogleCloudDialogflowV2QueryResult `json:"queryResult,omitempty"` + + // ResponseId: The unique identifier of the response. Contains the same + // value as + // `[Streaming]DetectIntentResponse.response_id`. + ResponseId string `json:"responseId,omitempty"` + + // Session: The unique identifier of detectIntent request session. + // Can be used to identify end-user inside webhook + // implementation. + // Format: `projects//agent/sessions/`. + Session string `json:"session,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "OriginalDetectIntentRequest") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "OriginalDetectIntentRequest") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2WebhookRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2WebhookRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2WebhookResponse: The response message for a +// webhook call. +type GoogleCloudDialogflowV2WebhookResponse struct { + // FollowupEventInput: Optional. Makes the platform immediately invoke + // another `DetectIntent` call + // internally with the specified event as input. + FollowupEventInput *GoogleCloudDialogflowV2EventInput `json:"followupEventInput,omitempty"` + + // FulfillmentMessages: Optional. The collection of rich messages to + // present to the user. This + // value is passed directly to `QueryResult.fulfillment_messages`. + FulfillmentMessages []*GoogleCloudDialogflowV2IntentMessage `json:"fulfillmentMessages,omitempty"` + + // FulfillmentText: Optional. The text to be shown on the screen. This + // value is passed directly + // to `QueryResult.fulfillment_text`. + FulfillmentText string `json:"fulfillmentText,omitempty"` + + // OutputContexts: Optional. The collection of output contexts. This + // value is passed directly + // to `QueryResult.output_contexts`. + OutputContexts []*GoogleCloudDialogflowV2Context `json:"outputContexts,omitempty"` + + // Payload: Optional. This value is passed directly to + // `QueryResult.webhook_payload`. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // Source: Optional. This value is passed directly to + // `QueryResult.webhook_source`. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FollowupEventInput") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FollowupEventInput") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2WebhookResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2WebhookResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1BatchUpdateEntityTypesResponse: The +// response message for EntityTypes.BatchUpdateEntityTypes. +type GoogleCloudDialogflowV2beta1BatchUpdateEntityTypesResponse struct { + // EntityTypes: The collection of updated or created entity types. + EntityTypes []*GoogleCloudDialogflowV2beta1EntityType `json:"entityTypes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityTypes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1BatchUpdateEntityTypesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1BatchUpdateEntityTypesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1BatchUpdateIntentsResponse: The response +// message for Intents.BatchUpdateIntents. +type GoogleCloudDialogflowV2beta1BatchUpdateIntentsResponse struct { + // Intents: The collection of updated or created intents. + Intents []*GoogleCloudDialogflowV2beta1Intent `json:"intents,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Intents") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Intents") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1BatchUpdateIntentsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1BatchUpdateIntentsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1Context: Represents a context. +type GoogleCloudDialogflowV2beta1Context struct { + // LifespanCount: Optional. The number of conversational query requests + // after which the + // context expires. If set to `0` (the default) the context + // expires + // immediately. Contexts expire automatically after 10 minutes even if + // there + // are no matching queries. + LifespanCount int64 `json:"lifespanCount,omitempty"` + + // Name: Required. The unique identifier of the context. + // Format: + // `projects//agent/sessions//contexts/`, + // or + // `projects//agent/runtimes//sessions//contexts/`. + // Note: Runtimes are under construction and will be available soon. + // The Context ID is always converted to lowercase. + // If is not specified, we assume default 'sandbox' + // runtime. + Name string `json:"name,omitempty"` + + // Parameters: Optional. The collection of parameters associated with + // this context. + // Refer to [this + // doc](https://dialogflow.com/docs/actions-and-parameters) for + // syntax. + Parameters googleapi.RawMessage `json:"parameters,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LifespanCount") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LifespanCount") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1Context) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1Context + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1EntityType: Represents an entity +// type. +// Entity types serve as a tool for extracting parameter values from +// natural +// language queries. +type GoogleCloudDialogflowV2beta1EntityType struct { + // AutoExpansionMode: Optional. Indicates whether the entity type can be + // automatically + // expanded. + // + // Possible values: + // "AUTO_EXPANSION_MODE_UNSPECIFIED" - Auto expansion disabled for the + // entity. + // "AUTO_EXPANSION_MODE_DEFAULT" - Allows an agent to recognize values + // that have not been explicitly + // listed in the entity. + AutoExpansionMode string `json:"autoExpansionMode,omitempty"` + + // DisplayName: Required. The name of the entity. + DisplayName string `json:"displayName,omitempty"` + + // Entities: Optional. The collection of entities associated with the + // entity type. + Entities []*GoogleCloudDialogflowV2beta1EntityTypeEntity `json:"entities,omitempty"` + + // Kind: Required. Indicates the kind of entity type. + // + // Possible values: + // "KIND_UNSPECIFIED" - Not specified. This value should be never + // used. + // "KIND_MAP" - Map entity types allow mapping of a group of synonyms + // to a canonical + // value. + // "KIND_LIST" - List entity types contain a set of entries that do + // not map to canonical + // values. However, list entity types can contain references to other + // entity + // types (with or without aliases). + Kind string `json:"kind,omitempty"` + + // Name: Required for all methods except `create` (`create` populates + // the name + // automatically. + // The unique identifier of the entity type. Format: + // `projects//agent/entityTypes/`. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoExpansionMode") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoExpansionMode") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1EntityType) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1EntityType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1EntityTypeEntity: Optional. Represents an +// entity. +type GoogleCloudDialogflowV2beta1EntityTypeEntity struct { + // Synonyms: Required. A collection of synonyms. For `KIND_LIST` entity + // types this + // must contain exactly one synonym equal to `value`. + Synonyms []string `json:"synonyms,omitempty"` + + // Value: Required. + // For `KIND_MAP` entity types: + // A canonical name to be used in place of synonyms. + // For `KIND_LIST` entity types: + // A string that can contain references to other entity types (with + // or + // without aliases). + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Synonyms") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Synonyms") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1EntityTypeEntity) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1EntityTypeEntity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1EventInput: Events allow for matching +// intents by event name instead of the natural +// language input. For instance, input `` can trigger a personalized +// welcome response. +// The parameter `name` may be used by the agent in the +// response: +// `“Hello #welcome_event.name! What can I do for you today?”`. +type GoogleCloudDialogflowV2beta1EventInput struct { + // LanguageCode: Required. The language of this query. See + // [Language + // Support](https://dialogflow.com/docs/languages) for a list of + // the + // currently supported language codes. Note that queries in the same + // session + // do not necessarily need to specify the same language. + LanguageCode string `json:"languageCode,omitempty"` + + // Name: Required. The unique identifier of the event. + Name string `json:"name,omitempty"` + + // Parameters: Optional. The collection of parameters associated with + // the event. + Parameters googleapi.RawMessage `json:"parameters,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LanguageCode") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LanguageCode") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1EventInput) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1EventInput + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1ExportAgentResponse: The response message +// for Agents.ExportAgent. +type GoogleCloudDialogflowV2beta1ExportAgentResponse struct { + // AgentContent: The exported agent. + // + // Example for how to export an agent to a zip file via a command + // line: + // + // curl \ + // + // 'https://dialogflow.googleapis.com/v2beta1/projects//age + // nt:export'\ + // -X POST \ + // -H 'Authorization: Bearer '$(gcloud auth print-access-token) \ + // -H 'Accept: application/json' \ + // -H 'Content-Type: application/json' \ + // --compressed \ + // --data-binary '{}' \ + // | grep agentContent | sed -e 's/.*"agentContent": "\([^"]*\)".*/\1/' + // \ + // | base64 --decode > + AgentContent string `json:"agentContent,omitempty"` + + // AgentUri: The URI to a file containing the exported agent. This field + // is populated + // only if `agent_uri` is specified in `ExportAgentRequest`. + AgentUri string `json:"agentUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AgentContent") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AgentContent") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1ExportAgentResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1ExportAgentResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1Intent: Represents an intent. +// Intents convert a number of user expressions or patterns into an +// action. An +// action is an extraction of a user command or sentence semantics. +type GoogleCloudDialogflowV2beta1Intent struct { + // Action: Optional. The name of the action associated with the intent. + Action string `json:"action,omitempty"` + + // DefaultResponsePlatforms: Optional. The list of platforms for which + // the first response will be + // taken from among the messages assigned to the DEFAULT_PLATFORM. + // + // Possible values: + // "PLATFORM_UNSPECIFIED" - Not specified. + // "FACEBOOK" - Facebook. + // "SLACK" - Slack. + // "TELEGRAM" - Telegram. + // "KIK" - Kik. + // "SKYPE" - Skype. + // "LINE" - Line. + // "VIBER" - Viber. + // "ACTIONS_ON_GOOGLE" - Actions on Google. + DefaultResponsePlatforms []string `json:"defaultResponsePlatforms,omitempty"` + + // DisplayName: Required. The name of this intent. + DisplayName string `json:"displayName,omitempty"` + + // Events: Optional. The collection of event names that trigger the + // intent. + // If the collection of input contexts is not empty, all of the contexts + // must + // be present in the active user session for an event to trigger this + // intent. + Events []string `json:"events,omitempty"` + + // FollowupIntentInfo: Optional. Collection of information about all + // followup intents that have + // name of this intent as a root_name. + FollowupIntentInfo []*GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo `json:"followupIntentInfo,omitempty"` + + // InputContextNames: Optional. The list of context names required for + // this intent to be + // triggered. + // Format: `projects//agent/sessions/-/contexts/`. + InputContextNames []string `json:"inputContextNames,omitempty"` + + // IsFallback: Optional. Indicates whether this is a fallback intent. + IsFallback bool `json:"isFallback,omitempty"` + + // Messages: Optional. The collection of rich messages corresponding to + // the + // `Response` field in API.AI console. + Messages []*GoogleCloudDialogflowV2beta1IntentMessage `json:"messages,omitempty"` + + // MlDisabled: Optional. Indicates whether Machine Learning is disabled + // for the intent. + // Note: If `ml_disabled` setting is set to true, then this intent is + // not + // taken into account during inference in `ML ONLY` match mode. + // Also, + // auto-markup in the UI is turned off. + MlDisabled bool `json:"mlDisabled,omitempty"` + + // MlEnabled: Optional. Indicates whether Machine Learning is enabled + // for the intent. + // Note: If `ml_enabled` setting is set to false, then this intent is + // not + // taken into account during inference in `ML ONLY` match mode. + // Also, + // auto-markup in the UI is turned off. + // DEPRECATED! Please use `ml_disabled` field instead. + // NOTE: If neither `ml_enabled` nor `ml_disabled` field is set, then + // the + // default value is determined as follows: + // - Before April 15th, 2018 the default is: + // ml_enabled = false / ml_disabled = true. + // - After April 15th, 2018 the default is: + // ml_enabled = true / ml_disabled = false. + MlEnabled bool `json:"mlEnabled,omitempty"` + + // Name: Required for all methods except `create` (`create` populates + // the name + // automatically. + // The unique identifier of this intent. + // Format: `projects//agent/intents/`. + Name string `json:"name,omitempty"` + + // OutputContexts: Optional. The collection of contexts that are + // activated when the intent + // is matched. Context messages in this collection should not set + // the + // parameters field. Setting the `lifespan_count` to 0 will reset the + // context + // when the intent is matched. + // Format: `projects//agent/sessions/-/contexts/`. + OutputContexts []*GoogleCloudDialogflowV2beta1Context `json:"outputContexts,omitempty"` + + // Parameters: Optional. The collection of parameters associated with + // the intent. + Parameters []*GoogleCloudDialogflowV2beta1IntentParameter `json:"parameters,omitempty"` + + // ParentFollowupIntentName: The unique identifier of the parent intent + // in the chain of followup + // intents. + // It identifies the parent followup intent. + // Format: `projects//agent/intents/`. + ParentFollowupIntentName string `json:"parentFollowupIntentName,omitempty"` + + // Priority: Optional. The priority of this intent. Higher numbers + // represent higher + // priorities. Zero or negative numbers mean that the intent is + // disabled. + Priority int64 `json:"priority,omitempty"` + + // ResetContexts: Optional. Indicates whether to delete all contexts in + // the current + // session when this intent is matched. + ResetContexts bool `json:"resetContexts,omitempty"` + + // RootFollowupIntentName: The unique identifier of the root intent in + // the chain of followup intents. + // It identifies the correct followup intents chain for this + // intent. + // Format: `projects//agent/intents/`. + RootFollowupIntentName string `json:"rootFollowupIntentName,omitempty"` + + // TrainingPhrases: Optional. The collection of examples/templates that + // the agent is + // trained on. + TrainingPhrases []*GoogleCloudDialogflowV2beta1IntentTrainingPhrase `json:"trainingPhrases,omitempty"` + + // WebhookState: Required. Indicates whether webhooks are enabled for + // the intent. + // + // Possible values: + // "WEBHOOK_STATE_UNSPECIFIED" - Webhook is disabled in the agent and + // in the intent. + // "WEBHOOK_STATE_ENABLED" - Webhook is enabled in the agent and in + // the intent. + // "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING" - Webhook is enabled in + // the agent and in the intent. Also, each slot + // filling prompt is forwarded to the webhook. + WebhookState string `json:"webhookState,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1Intent) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1Intent + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo: Represents a +// single followup intent in the chain. +type GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo struct { + // FollowupIntentName: The unique identifier of the followup + // intent. + // Format: `projects//agent/intents/`. + FollowupIntentName string `json:"followupIntentName,omitempty"` + + // ParentFollowupIntentName: The unique identifier of the followup + // intent parent. + // Format: `projects//agent/intents/`. + ParentFollowupIntentName string `json:"parentFollowupIntentName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FollowupIntentName") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FollowupIntentName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentFollowupIntentInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessage: Corresponds to the +// `Response` field in API.AI console. +type GoogleCloudDialogflowV2beta1IntentMessage struct { + // BasicCard: Displays a basic card for Actions on Google. + BasicCard *GoogleCloudDialogflowV2beta1IntentMessageBasicCard `json:"basicCard,omitempty"` + + // Card: Displays a card. + Card *GoogleCloudDialogflowV2beta1IntentMessageCard `json:"card,omitempty"` + + // CarouselSelect: Displays a carousel card for Actions on Google. + CarouselSelect *GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect `json:"carouselSelect,omitempty"` + + // Image: Displays an image. + Image *GoogleCloudDialogflowV2beta1IntentMessageImage `json:"image,omitempty"` + + // LinkOutSuggestion: Displays a link out suggestion chip for Actions on + // Google. + LinkOutSuggestion *GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion `json:"linkOutSuggestion,omitempty"` + + // ListSelect: Displays a list card for Actions on Google. + ListSelect *GoogleCloudDialogflowV2beta1IntentMessageListSelect `json:"listSelect,omitempty"` + + // Payload: Returns a response containing a custom payload. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // Platform: Optional. The platform that this message is intended for. + // + // Possible values: + // "PLATFORM_UNSPECIFIED" - Not specified. + // "FACEBOOK" - Facebook. + // "SLACK" - Slack. + // "TELEGRAM" - Telegram. + // "KIK" - Kik. + // "SKYPE" - Skype. + // "LINE" - Line. + // "VIBER" - Viber. + // "ACTIONS_ON_GOOGLE" - Actions on Google. + Platform string `json:"platform,omitempty"` + + // QuickReplies: Displays quick replies. + QuickReplies *GoogleCloudDialogflowV2beta1IntentMessageQuickReplies `json:"quickReplies,omitempty"` + + // SimpleResponses: Returns a voice or text-only response for Actions on + // Google. + SimpleResponses *GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses `json:"simpleResponses,omitempty"` + + // Suggestions: Displays suggestion chips for Actions on Google. + Suggestions *GoogleCloudDialogflowV2beta1IntentMessageSuggestions `json:"suggestions,omitempty"` + + // Text: Returns a text response. + Text *GoogleCloudDialogflowV2beta1IntentMessageText `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BasicCard") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BasicCard") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessage) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageBasicCard: The basic card +// message. Useful for displaying information. +type GoogleCloudDialogflowV2beta1IntentMessageBasicCard struct { + // Buttons: Optional. The collection of card buttons. + Buttons []*GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton `json:"buttons,omitempty"` + + // FormattedText: Required, unless image is present. The body text of + // the card. + FormattedText string `json:"formattedText,omitempty"` + + // Image: Optional. The image for the card. + Image *GoogleCloudDialogflowV2beta1IntentMessageImage `json:"image,omitempty"` + + // Subtitle: Optional. The subtitle of the card. + Subtitle string `json:"subtitle,omitempty"` + + // Title: Optional. The title of the card. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Buttons") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Buttons") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageBasicCard) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageBasicCard + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton: The button +// object that appears at the bottom of a card. +type GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton struct { + // OpenUriAction: Required. Action to take when a user taps on the + // button. + OpenUriAction *GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction `json:"openUriAction,omitempty"` + + // Title: Required. The title of the button. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OpenUriAction") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OpenUriAction") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageBasicCardButton + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction: +// Opens the given URI. +type GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction struct { + // Uri: Required. The HTTP or HTTPS scheme URI. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Uri") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Uri") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageBasicCardButtonOpenUriAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageCard: The card response +// message. +type GoogleCloudDialogflowV2beta1IntentMessageCard struct { + // Buttons: Optional. The collection of card buttons. + Buttons []*GoogleCloudDialogflowV2beta1IntentMessageCardButton `json:"buttons,omitempty"` + + // ImageUri: Optional. The public URI to an image file for the card. + ImageUri string `json:"imageUri,omitempty"` + + // Subtitle: Optional. The subtitle of the card. + Subtitle string `json:"subtitle,omitempty"` + + // Title: Optional. The title of the card. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Buttons") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Buttons") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageCard) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageCard + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageCardButton: Optional. +// Contains information about a button. +type GoogleCloudDialogflowV2beta1IntentMessageCardButton struct { + // Postback: Optional. The text to send back to the Dialogflow API or a + // URI to + // open. + Postback string `json:"postback,omitempty"` + + // Text: Optional. The text to show on the button. + Text string `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Postback") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Postback") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageCardButton) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageCardButton + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect: The card for +// presenting a carousel of options to select from. +type GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect struct { + // Items: Required. Carousel items. + Items []*GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem `json:"items,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageCarouselSelect + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem: An item +// in the carousel. +type GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem struct { + // Description: Optional. The body text of the card. + Description string `json:"description,omitempty"` + + // Image: Optional. The image to display. + Image *GoogleCloudDialogflowV2beta1IntentMessageImage `json:"image,omitempty"` + + // Info: Required. Additional info about the option item. + Info *GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo `json:"info,omitempty"` + + // Title: Required. Title of the carousel item. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageCarouselSelectItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageImage: The image response +// message. +type GoogleCloudDialogflowV2beta1IntentMessageImage struct { + // AccessibilityText: Optional. A text description of the image to be + // used for accessibility, + // e.g., screen readers. + AccessibilityText string `json:"accessibilityText,omitempty"` + + // ImageUri: Optional. The public URI to an image file. + ImageUri string `json:"imageUri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessibilityText") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessibilityText") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageImage) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageImage + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion: The +// suggestion chip message that allows the user to jump out to the +// app +// or website associated with this agent. +type GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion struct { + // DestinationName: Required. The name of the app or site this chip is + // linking to. + DestinationName string `json:"destinationName,omitempty"` + + // Uri: Required. The URI of the app or site to open when the user taps + // the + // suggestion chip. + Uri string `json:"uri,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageLinkOutSuggestion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageListSelect: The card for +// presenting a list of options to select from. +type GoogleCloudDialogflowV2beta1IntentMessageListSelect struct { + // Items: Required. List items. + Items []*GoogleCloudDialogflowV2beta1IntentMessageListSelectItem `json:"items,omitempty"` + + // Title: Optional. The overall title of the list. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageListSelect) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageListSelect + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageListSelectItem: An item in +// the list. +type GoogleCloudDialogflowV2beta1IntentMessageListSelectItem struct { + // Description: Optional. The main text describing the item. + Description string `json:"description,omitempty"` + + // Image: Optional. The image to display. + Image *GoogleCloudDialogflowV2beta1IntentMessageImage `json:"image,omitempty"` + + // Info: Required. Additional information about this option. + Info *GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo `json:"info,omitempty"` + + // Title: Required. The title of the list item. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageListSelectItem) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageListSelectItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageQuickReplies: The quick +// replies response message. +type GoogleCloudDialogflowV2beta1IntentMessageQuickReplies struct { + // QuickReplies: Optional. The collection of quick replies. + QuickReplies []string `json:"quickReplies,omitempty"` + + // Title: Optional. The title of the collection of quick replies. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuickReplies") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuickReplies") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageQuickReplies) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageQuickReplies + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo: Additional +// info about the select item for when it is triggered in a +// dialog. +type GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo struct { + // Key: Required. A unique key that will be sent back to the agent if + // this + // response is given. + Key string `json:"key,omitempty"` + + // Synonyms: Optional. A list of synonyms that can also be used to + // trigger this + // item in dialog. + Synonyms []string `json:"synonyms,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageSelectItemInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse: The simple +// response message containing speech or text. +type GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse struct { + // DisplayText: Optional. The text to display. + DisplayText string `json:"displayText,omitempty"` + + // Ssml: One of text_to_speech or ssml must be provided. Structured + // spoken + // response to the user in the SSML format. Mutually exclusive + // with + // text_to_speech. + Ssml string `json:"ssml,omitempty"` + + // TextToSpeech: One of text_to_speech or ssml must be provided. The + // plain text of the + // speech output. Mutually exclusive with ssml. + TextToSpeech string `json:"textToSpeech,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayText") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayText") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses: The +// collection of simple response candidates. +// This message in `QueryResult.fulfillment_messages` +// and +// `WebhookResponse.fulfillment_messages` should contain only +// one +// `SimpleResponse`. +type GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses struct { + // SimpleResponses: Required. The list of simple responses. + SimpleResponses []*GoogleCloudDialogflowV2beta1IntentMessageSimpleResponse `json:"simpleResponses,omitempty"` + + // ForceSendFields is a list of field names (e.g. "SimpleResponses") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SimpleResponses") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageSimpleResponses + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageSuggestion: The suggestion +// chip message that the user can tap to quickly post a reply +// to the conversation. +type GoogleCloudDialogflowV2beta1IntentMessageSuggestion struct { + // Title: Required. The text shown the in the suggestion chip. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Title") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Title") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageSuggestion) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageSuggestion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageSuggestions: The collection +// of suggestions. +type GoogleCloudDialogflowV2beta1IntentMessageSuggestions struct { + // Suggestions: Required. The list of suggested replies. + Suggestions []*GoogleCloudDialogflowV2beta1IntentMessageSuggestion `json:"suggestions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Suggestions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Suggestions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageSuggestions) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageSuggestions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentMessageText: The text response +// message. +type GoogleCloudDialogflowV2beta1IntentMessageText struct { + // Text: Optional. The collection of the agent's responses. + Text []string `json:"text,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Text") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Text") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentMessageText) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentMessageText + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentParameter: Represents intent +// parameters. +type GoogleCloudDialogflowV2beta1IntentParameter struct { + // DefaultValue: Optional. The default value to use when the `value` + // yields an empty + // result. + // Default values can be extracted from contexts by using the + // following + // syntax: `#context_name.parameter_name`. + DefaultValue string `json:"defaultValue,omitempty"` + + // DisplayName: Required. The name of the parameter. + DisplayName string `json:"displayName,omitempty"` + + // EntityTypeDisplayName: Optional. The name of the entity type, + // prefixed with `@`, that + // describes values of the parameter. If the parameter is + // required, this must be provided. + EntityTypeDisplayName string `json:"entityTypeDisplayName,omitempty"` + + // IsList: Optional. Indicates whether the parameter represents a list + // of values. + IsList bool `json:"isList,omitempty"` + + // Mandatory: Optional. Indicates whether the parameter is required. + // That is, + // whether the intent cannot be completed without collecting the + // parameter + // value. + Mandatory bool `json:"mandatory,omitempty"` + + // Name: The unique identifier of this parameter. + Name string `json:"name,omitempty"` + + // Prompts: Optional. The collection of prompts that the agent can + // present to the + // user in order to collect value for the parameter. + Prompts []string `json:"prompts,omitempty"` + + // Value: Optional. The definition of the parameter value. It can be: + // - a constant string, + // - a parameter value defined as `$parameter_name`, + // - an original parameter value defined as + // `$parameter_name.original`, + // - a parameter value from some context defined as + // `#context_name.parameter_name`. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultValue") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentParameter) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentParameter + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentTrainingPhrase: Represents an +// example or template that the agent is trained on. +type GoogleCloudDialogflowV2beta1IntentTrainingPhrase struct { + // Name: Required. The unique identifier of this training phrase. + Name string `json:"name,omitempty"` + + // Parts: Required. The collection of training phrase parts (can be + // annotated). + // Fields: `entity_type`, `alias` and `user_defined` should be + // populated + // only for the annotated parts of the training phrase. + Parts []*GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart `json:"parts,omitempty"` + + // TimesAddedCount: Optional. Indicates how many times this example or + // template was added to + // the intent. Each time a developer adds an existing sample by editing + // an + // intent or training, this counter is increased. + TimesAddedCount int64 `json:"timesAddedCount,omitempty"` + + // Type: Required. The type of the training phrase. + // + // Possible values: + // "TYPE_UNSPECIFIED" - Not specified. This value should never be + // used. + // "EXAMPLE" - Examples do not contain @-prefixed entity type names, + // but example parts + // can be annotated with entity types. + // "TEMPLATE" - Templates are not annotated with entity types, but + // they can contain + // @-prefixed entity type names as substrings. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentTrainingPhrase) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentTrainingPhrase + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart: Represents a +// part of a training phrase. +type GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart struct { + // Alias: Optional. The parameter name for the value extracted from + // the + // annotated part of the example. + Alias string `json:"alias,omitempty"` + + // EntityType: Optional. The entity type name prefixed with `@`. This + // field is + // required for the annotated part of the text and applies only + // to + // examples. + EntityType string `json:"entityType,omitempty"` + + // Text: Required. The text corresponding to the example or template, + // if there are no annotations. For + // annotated examples, it is the text for one of the example's parts. + Text string `json:"text,omitempty"` + + // UserDefined: Optional. Indicates whether the text was manually + // annotated by the + // developer. + UserDefined bool `json:"userDefined,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Alias") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Alias") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1IntentTrainingPhrasePart + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest: Represents +// the contents of the original request that was passed to +// the `[Streaming]DetectIntent` call. +type GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest struct { + // Payload: Optional. This field is set to the value of + // `QueryParameters.payload` field + // passed in the request. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // Source: The source of this request, e.g., `google`, `facebook`, + // `slack`. It is set + // by Dialogflow-owned servers. Possible values of this field correspond + // to + // Intent.Message.Platform. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Payload") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Payload") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1QueryResult: Represents the result of +// conversational query or event processing. +type GoogleCloudDialogflowV2beta1QueryResult struct { + // Action: The action name from the matched intent. + Action string `json:"action,omitempty"` + + // AllRequiredParamsPresent: This field is set to: + // - `false` if the matched intent has required parameters and not all + // of + // the required parameter values have been collected. + // - `true` if all required parameter values have been collected, or if + // the + // matched intent doesn't contain any required parameters. + AllRequiredParamsPresent bool `json:"allRequiredParamsPresent,omitempty"` + + // DiagnosticInfo: The free-form diagnostic info. For example, this + // field + // could contain webhook call latency. + DiagnosticInfo googleapi.RawMessage `json:"diagnosticInfo,omitempty"` + + // FulfillmentMessages: The collection of rich messages to present to + // the user. + FulfillmentMessages []*GoogleCloudDialogflowV2beta1IntentMessage `json:"fulfillmentMessages,omitempty"` + + // FulfillmentText: The text to be pronounced to the user or shown on + // the screen. + FulfillmentText string `json:"fulfillmentText,omitempty"` + + // Intent: The intent that matched the conversational query. Some, + // not + // all fields are filled in this message, including but not limited + // to: + // `name`, `display_name` and `webhook_state`. + Intent *GoogleCloudDialogflowV2beta1Intent `json:"intent,omitempty"` + + // IntentDetectionConfidence: The intent detection confidence. Values + // range from 0.0 + // (completely uncertain) to 1.0 (completely certain). + IntentDetectionConfidence float64 `json:"intentDetectionConfidence,omitempty"` + + // LanguageCode: The language that was triggered during intent + // detection. + // See [Language + // Support](https://dialogflow.com/docs/reference/language) + // for a list of the currently supported language codes. + LanguageCode string `json:"languageCode,omitempty"` + + // OutputContexts: The collection of output contexts. If + // applicable, + // `output_contexts.parameters` contains entries with name + // `.original` containing the original parameter + // values + // before the query. + OutputContexts []*GoogleCloudDialogflowV2beta1Context `json:"outputContexts,omitempty"` + + // Parameters: The collection of extracted parameters. + Parameters googleapi.RawMessage `json:"parameters,omitempty"` + + // QueryText: The original conversational query text: + // - If natural language text was provided as input, `query_text` + // contains + // a copy of the input. + // - If natural language speech audio was provided as input, + // `query_text` + // contains the speech recognition result. If speech recognizer + // produced + // multiple alternatives, a particular one is picked. + // - If an event was provided as input, `query_text` is not set. + QueryText string `json:"queryText,omitempty"` + + // SpeechRecognitionConfidence: The Speech recognition confidence + // between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words + // are + // correct. The default of 0.0 is a sentinel value indicating that + // confidence + // was not set. + // + // You should not rely on this field as it isn't guaranteed to be + // accurate, or + // even set. In particular this field isn't set in Webhook calls and + // for + // StreamingDetectIntent since the streaming endpoint has separate + // confidence + // estimates per portion of the audio in StreamingRecognitionResult. + SpeechRecognitionConfidence float64 `json:"speechRecognitionConfidence,omitempty"` + + // WebhookPayload: If the query was fulfilled by a webhook call, this + // field is set to the + // value of the `payload` field returned in the webhook response. + WebhookPayload googleapi.RawMessage `json:"webhookPayload,omitempty"` + + // WebhookSource: If the query was fulfilled by a webhook call, this + // field is set to the + // value of the `source` field returned in the webhook response. + WebhookSource string `json:"webhookSource,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1QueryResult) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1QueryResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GoogleCloudDialogflowV2beta1QueryResult) UnmarshalJSON(data []byte) error { + type NoMethod GoogleCloudDialogflowV2beta1QueryResult + var s1 struct { + IntentDetectionConfidence gensupport.JSONFloat64 `json:"intentDetectionConfidence"` + SpeechRecognitionConfidence gensupport.JSONFloat64 `json:"speechRecognitionConfidence"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.IntentDetectionConfidence = float64(s1.IntentDetectionConfidence) + s.SpeechRecognitionConfidence = float64(s1.SpeechRecognitionConfidence) + return nil +} + +// GoogleCloudDialogflowV2beta1WebhookRequest: The request message for a +// webhook call. +type GoogleCloudDialogflowV2beta1WebhookRequest struct { + // OriginalDetectIntentRequest: Optional. The contents of the original + // request that was passed to + // `[Streaming]DetectIntent` call. + OriginalDetectIntentRequest *GoogleCloudDialogflowV2beta1OriginalDetectIntentRequest `json:"originalDetectIntentRequest,omitempty"` + + // QueryResult: The result of the conversational query or event + // processing. Contains the + // same value as `[Streaming]DetectIntentResponse.query_result`. + QueryResult *GoogleCloudDialogflowV2beta1QueryResult `json:"queryResult,omitempty"` + + // ResponseId: The unique identifier of the response. Contains the same + // value as + // `[Streaming]DetectIntentResponse.response_id`. + ResponseId string `json:"responseId,omitempty"` + + // Session: The unique identifier of detectIntent request session. + // Can be used to identify end-user inside webhook + // implementation. + // Format: `projects//agent/sessions/`. + Session string `json:"session,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "OriginalDetectIntentRequest") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "OriginalDetectIntentRequest") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1WebhookRequest) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1WebhookRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleCloudDialogflowV2beta1WebhookResponse: The response message for +// a webhook call. +type GoogleCloudDialogflowV2beta1WebhookResponse struct { + // FollowupEventInput: Optional. Makes the platform immediately invoke + // another `DetectIntent` call + // internally with the specified event as input. + FollowupEventInput *GoogleCloudDialogflowV2beta1EventInput `json:"followupEventInput,omitempty"` + + // FulfillmentMessages: Optional. The collection of rich messages to + // present to the user. This + // value is passed directly to `QueryResult.fulfillment_messages`. + FulfillmentMessages []*GoogleCloudDialogflowV2beta1IntentMessage `json:"fulfillmentMessages,omitempty"` + + // FulfillmentText: Optional. The text to be shown on the screen. This + // value is passed directly + // to `QueryResult.fulfillment_text`. + FulfillmentText string `json:"fulfillmentText,omitempty"` + + // OutputContexts: Optional. The collection of output contexts. This + // value is passed directly + // to `QueryResult.output_contexts`. + OutputContexts []*GoogleCloudDialogflowV2beta1Context `json:"outputContexts,omitempty"` + + // Payload: Optional. This value is passed directly to + // `QueryResult.webhook_payload`. + Payload googleapi.RawMessage `json:"payload,omitempty"` + + // Source: Optional. This value is passed directly to + // `QueryResult.webhook_source`. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FollowupEventInput") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FollowupEventInput") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GoogleCloudDialogflowV2beta1WebhookResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleCloudDialogflowV2beta1WebhookResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleLongrunningOperation: This resource represents a long-running +// operation that is the result of a +// network API call. +type GoogleLongrunningOperation struct { + // Done: If the value is `false`, it means the operation is still in + // progress. + // If `true`, the operation is completed, and either `error` or + // `response` is + // available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *GoogleRpcStatus `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. + // It typically + // contains progress information and common metadata such as create + // time. + // Some services might not provide such metadata. Any method that + // returns a + // long-running operation should document the metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that + // originally returns it. If you use the default HTTP mapping, + // the + // `name` should have the format of `operations/some/unique/name`. + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. + // If the original + // method returns no data on success, such as `Delete`, the response + // is + // `google.protobuf.Empty`. If the original method is + // standard + // `Get`/`Create`/`Update`, the response should be the resource. For + // other + // methods, the response should have the type `XxxResponse`, where + // `Xxx` + // is the original method name. For example, if the original method + // name + // is `TakeSnapshot()`, the inferred response type + // is + // `TakeSnapshotResponse`. + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleProtobufEmpty: A generic empty message that you can re-use to +// avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type GoogleProtobufEmpty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// GoogleRpcStatus: The `Status` type defines a logical error model that +// is suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. +type GoogleRpcStatus struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of + // message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { + type NoMethod GoogleRpcStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleTypeLatLng: An object representing a latitude/longitude pair. +// This is expressed as a pair +// of doubles representing degrees latitude and degrees longitude. +// Unless +// specified otherwise, this must conform to the +// WGS84 +// st +// andard. Values must be within normalized ranges. +type GoogleTypeLatLng struct { + // Latitude: The latitude in degrees. It must be in the range [-90.0, + // +90.0]. + Latitude float64 `json:"latitude,omitempty"` + + // Longitude: The longitude in degrees. It must be in the range [-180.0, + // +180.0]. + Longitude float64 `json:"longitude,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Latitude") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Latitude") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleTypeLatLng) MarshalJSON() ([]byte, error) { + type NoMethod GoogleTypeLatLng + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GoogleTypeLatLng) UnmarshalJSON(data []byte) error { + type NoMethod GoogleTypeLatLng + var s1 struct { + Latitude gensupport.JSONFloat64 `json:"latitude"` + Longitude gensupport.JSONFloat64 `json:"longitude"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Latitude = float64(s1.Latitude) + s.Longitude = float64(s1.Longitude) + return nil +} + +// method id "dialogflow.projects.getAgent": + +type ProjectsGetAgentCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetAgent: Retrieves the specified agent. +func (r *ProjectsService) GetAgent(parent string) *ProjectsGetAgentCall { + c := &ProjectsGetAgentCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsGetAgentCall) Fields(s ...googleapi.Field) *ProjectsGetAgentCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsGetAgentCall) IfNoneMatch(entityTag string) *ProjectsGetAgentCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsGetAgentCall) Context(ctx context.Context) *ProjectsGetAgentCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsGetAgentCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsGetAgentCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/agent") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.getAgent" call. +// Exactly one of *GoogleCloudDialogflowV2Agent or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Agent.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsGetAgentCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Agent, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Agent{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified agent.", + // "flatPath": "v2/projects/{projectsId}/agent", + // "httpMethod": "GET", + // "id": "dialogflow.projects.getAgent", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The project that the agent to fetch is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/agent", + // "response": { + // "$ref": "GoogleCloudDialogflowV2Agent" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.export": + +type ProjectsAgentExportCall struct { + s *Service + parent string + googleclouddialogflowv2exportagentrequest *GoogleCloudDialogflowV2ExportAgentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Export: Exports the specified agent to a ZIP file. +// +// +// Operation +func (r *ProjectsAgentService) Export(parent string, googleclouddialogflowv2exportagentrequest *GoogleCloudDialogflowV2ExportAgentRequest) *ProjectsAgentExportCall { + c := &ProjectsAgentExportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2exportagentrequest = googleclouddialogflowv2exportagentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentExportCall) Fields(s ...googleapi.Field) *ProjectsAgentExportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentExportCall) Context(ctx context.Context) *ProjectsAgentExportCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentExportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentExportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2exportagentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/agent:export") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.export" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentExportCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Exports the specified agent to a ZIP file.\n\n\nOperation \u003cresponse: ExportAgentResponse,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent:export", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.export", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The project that the agent to export is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/agent:export", + // "request": { + // "$ref": "GoogleCloudDialogflowV2ExportAgentRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.import": + +type ProjectsAgentImportCall struct { + s *Service + parent string + googleclouddialogflowv2importagentrequest *GoogleCloudDialogflowV2ImportAgentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Import: Imports the specified agent from a ZIP file. +// +// Uploads new intents and entity types without deleting the existing +// ones. +// Intents and entity types with the same name are replaced with the +// new +// versions from ImportAgentRequest. +// +// +// Operation +func (r *ProjectsAgentService) Import(parent string, googleclouddialogflowv2importagentrequest *GoogleCloudDialogflowV2ImportAgentRequest) *ProjectsAgentImportCall { + c := &ProjectsAgentImportCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2importagentrequest = googleclouddialogflowv2importagentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentImportCall) Fields(s ...googleapi.Field) *ProjectsAgentImportCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentImportCall) Context(ctx context.Context) *ProjectsAgentImportCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentImportCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentImportCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2importagentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/agent:import") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.import" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentImportCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Imports the specified agent from a ZIP file.\n\nUploads new intents and entity types without deleting the existing ones.\nIntents and entity types with the same name are replaced with the new\nversions from ImportAgentRequest.\n\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent:import", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.import", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The project that the agent to import is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/agent:import", + // "request": { + // "$ref": "GoogleCloudDialogflowV2ImportAgentRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.restore": + +type ProjectsAgentRestoreCall struct { + s *Service + parent string + googleclouddialogflowv2restoreagentrequest *GoogleCloudDialogflowV2RestoreAgentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores the specified agent from a ZIP file. +// +// Replaces the current agent version with a new one. All the intents +// and +// entity types in the older version are deleted. +// +// +// Operation +func (r *ProjectsAgentService) Restore(parent string, googleclouddialogflowv2restoreagentrequest *GoogleCloudDialogflowV2RestoreAgentRequest) *ProjectsAgentRestoreCall { + c := &ProjectsAgentRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2restoreagentrequest = googleclouddialogflowv2restoreagentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRestoreCall) Fields(s ...googleapi.Field) *ProjectsAgentRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRestoreCall) Context(ctx context.Context) *ProjectsAgentRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2restoreagentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/agent:restore") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.restore" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restores the specified agent from a ZIP file.\n\nReplaces the current agent version with a new one. All the intents and\nentity types in the older version are deleted.\n\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent:restore", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.restore", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The project that the agent to restore is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/agent:restore", + // "request": { + // "$ref": "GoogleCloudDialogflowV2RestoreAgentRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.search": + +type ProjectsAgentSearchCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Search: Returns the list of agents. +// +// Since there is at most one conversational agent per project, this +// method is +// useful primarily for listing all agents across projects the caller +// has +// access to. One can achieve that with a wildcard project collection id +// "-". +// Refer to +// [List +// Sub-Collections](https://cloud.google.com/apis/design/design_pat +// terns#list_sub-collections). +func (r *ProjectsAgentService) Search(parent string) *ProjectsAgentSearchCall { + c := &ProjectsAgentSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentSearchCall) PageSize(pageSize int64) *ProjectsAgentSearchCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentSearchCall) PageToken(pageToken string) *ProjectsAgentSearchCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSearchCall) Fields(s ...googleapi.Field) *ProjectsAgentSearchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentSearchCall) IfNoneMatch(entityTag string) *ProjectsAgentSearchCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSearchCall) Context(ctx context.Context) *ProjectsAgentSearchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSearchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSearchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/agent:search") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.search" call. +// Exactly one of *GoogleCloudDialogflowV2SearchAgentsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SearchAgentsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSearchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SearchAgentsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SearchAgentsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of agents.\n\nSince there is at most one conversational agent per project, this method is\nuseful primarily for listing all agents across projects the caller has\naccess to. One can achieve that with a wildcard project collection id \"-\".\nRefer to [List\nSub-Collections](https://cloud.google.com/apis/design/design_patterns#list_sub-collections).", + // "flatPath": "v2/projects/{projectsId}/agent:search", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.search", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The project to list agents from.\nFormat: `projects/\u003cProject ID or '-'\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/agent:search", + // "response": { + // "$ref": "GoogleCloudDialogflowV2SearchAgentsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentSearchCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2SearchAgentsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.train": + +type ProjectsAgentTrainCall struct { + s *Service + parent string + googleclouddialogflowv2trainagentrequest *GoogleCloudDialogflowV2TrainAgentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Train: Trains the specified agent. +// +// +// Operation +func (r *ProjectsAgentService) Train(parent string, googleclouddialogflowv2trainagentrequest *GoogleCloudDialogflowV2TrainAgentRequest) *ProjectsAgentTrainCall { + c := &ProjectsAgentTrainCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2trainagentrequest = googleclouddialogflowv2trainagentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentTrainCall) Fields(s ...googleapi.Field) *ProjectsAgentTrainCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentTrainCall) Context(ctx context.Context) *ProjectsAgentTrainCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentTrainCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentTrainCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2trainagentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/agent:train") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.train" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentTrainCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Trains the specified agent.\n\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent:train", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.train", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The project that the agent to train is associated with.\nFormat: `projects/\u003cProject ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/agent:train", + // "request": { + // "$ref": "GoogleCloudDialogflowV2TrainAgentRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.batchDelete": + +type ProjectsAgentEntityTypesBatchDeleteCall struct { + s *Service + parent string + googleclouddialogflowv2batchdeleteentitytypesrequest *GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchDelete: Deletes entity types in the specified agent. +// +// Operation +func (r *ProjectsAgentEntityTypesService) BatchDelete(parent string, googleclouddialogflowv2batchdeleteentitytypesrequest *GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest) *ProjectsAgentEntityTypesBatchDeleteCall { + c := &ProjectsAgentEntityTypesBatchDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchdeleteentitytypesrequest = googleclouddialogflowv2batchdeleteentitytypesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesBatchDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesBatchDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesBatchDeleteCall) Context(ctx context.Context) *ProjectsAgentEntityTypesBatchDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesBatchDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesBatchDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchdeleteentitytypesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes:batchDelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.batchDelete" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentEntityTypesBatchDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes entity types in the specified agent.\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes:batchDelete", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.entityTypes.batchDelete", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the agent to delete all entities types for. Format:\n`projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes:batchDelete", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchDeleteEntityTypesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.batchUpdate": + +type ProjectsAgentEntityTypesBatchUpdateCall struct { + s *Service + parent string + googleclouddialogflowv2batchupdateentitytypesrequest *GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchUpdate: Updates/Creates multiple entity types in the specified +// agent. +// +// Operation +func (r *ProjectsAgentEntityTypesService) BatchUpdate(parent string, googleclouddialogflowv2batchupdateentitytypesrequest *GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest) *ProjectsAgentEntityTypesBatchUpdateCall { + c := &ProjectsAgentEntityTypesBatchUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchupdateentitytypesrequest = googleclouddialogflowv2batchupdateentitytypesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesBatchUpdateCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesBatchUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesBatchUpdateCall) Context(ctx context.Context) *ProjectsAgentEntityTypesBatchUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesBatchUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesBatchUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchupdateentitytypesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes:batchUpdate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.batchUpdate" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentEntityTypesBatchUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates/Creates multiple entity types in the specified agent.\n\nOperation \u003cresponse: BatchUpdateEntityTypesResponse,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes:batchUpdate", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.entityTypes.batchUpdate", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the agent to update or create entity types in.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes:batchUpdate", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchUpdateEntityTypesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.create": + +type ProjectsAgentEntityTypesCreateCall struct { + s *Service + parent string + googleclouddialogflowv2entitytype *GoogleCloudDialogflowV2EntityType + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an entity type in the specified agent. +func (r *ProjectsAgentEntityTypesService) Create(parent string, googleclouddialogflowv2entitytype *GoogleCloudDialogflowV2EntityType) *ProjectsAgentEntityTypesCreateCall { + c := &ProjectsAgentEntityTypesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2entitytype = googleclouddialogflowv2entitytype + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// of entity synonyms defined in `entity_type`. If not +// specified, the agent's default language is used. +// [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentEntityTypesCreateCall) LanguageCode(languageCode string) *ProjectsAgentEntityTypesCreateCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesCreateCall) Context(ctx context.Context) *ProjectsAgentEntityTypesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2entitytype) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.create" call. +// Exactly one of *GoogleCloudDialogflowV2EntityType or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2EntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentEntityTypesCreateCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2EntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2EntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an entity type in the specified agent.", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.entityTypes.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "languageCode": { + // "description": "Optional. The language of entity synonyms defined in `entity_type`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The agent to create a entity type for.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes", + // "request": { + // "$ref": "GoogleCloudDialogflowV2EntityType" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2EntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.delete": + +type ProjectsAgentEntityTypesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified entity type. +func (r *ProjectsAgentEntityTypesService) Delete(name string) *ProjectsAgentEntityTypesDeleteCall { + c := &ProjectsAgentEntityTypesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesDeleteCall) Context(ctx context.Context) *ProjectsAgentEntityTypesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentEntityTypesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.entityTypes.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the entity type to delete.\nFormat: `projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntityType ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.get": + +type ProjectsAgentEntityTypesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified entity type. +func (r *ProjectsAgentEntityTypesService) Get(name string) *ProjectsAgentEntityTypesGetCall { + c := &ProjectsAgentEntityTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// to retrieve entity synonyms for. If not specified, +// the agent's default language is used. +// [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentEntityTypesGetCall) LanguageCode(languageCode string) *ProjectsAgentEntityTypesGetCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesGetCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentEntityTypesGetCall) IfNoneMatch(entityTag string) *ProjectsAgentEntityTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesGetCall) Context(ctx context.Context) *ProjectsAgentEntityTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.get" call. +// Exactly one of *GoogleCloudDialogflowV2EntityType or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2EntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentEntityTypesGetCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2EntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2EntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.entityTypes.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "languageCode": { + // "description": "Optional. The language to retrieve entity synonyms for. If not specified,\nthe agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required. The name of the entity type.\nFormat: `projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntityType ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleCloudDialogflowV2EntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.list": + +type ProjectsAgentEntityTypesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of all entity types in the specified agent. +func (r *ProjectsAgentEntityTypesService) List(parent string) *ProjectsAgentEntityTypesListCall { + c := &ProjectsAgentEntityTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// to list entity synonyms for. If not specified, +// the agent's default language is used. +// [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentEntityTypesListCall) LanguageCode(languageCode string) *ProjectsAgentEntityTypesListCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentEntityTypesListCall) PageSize(pageSize int64) *ProjectsAgentEntityTypesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentEntityTypesListCall) PageToken(pageToken string) *ProjectsAgentEntityTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesListCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentEntityTypesListCall) IfNoneMatch(entityTag string) *ProjectsAgentEntityTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesListCall) Context(ctx context.Context) *ProjectsAgentEntityTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.list" call. +// Exactly one of *GoogleCloudDialogflowV2ListEntityTypesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2ListEntityTypesResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentEntityTypesListCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2ListEntityTypesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2ListEntityTypesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of all entity types in the specified agent.", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.entityTypes.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "languageCode": { + // "description": "Optional. The language to list entity synonyms for. If not specified,\nthe agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The agent to list all entity types from.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes", + // "response": { + // "$ref": "GoogleCloudDialogflowV2ListEntityTypesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentEntityTypesListCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2ListEntityTypesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.entityTypes.patch": + +type ProjectsAgentEntityTypesPatchCall struct { + s *Service + nameid string + googleclouddialogflowv2entitytype *GoogleCloudDialogflowV2EntityType + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified entity type. +func (r *ProjectsAgentEntityTypesService) Patch(nameid string, googleclouddialogflowv2entitytype *GoogleCloudDialogflowV2EntityType) *ProjectsAgentEntityTypesPatchCall { + c := &ProjectsAgentEntityTypesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.googleclouddialogflowv2entitytype = googleclouddialogflowv2entitytype + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// of entity synonyms defined in `entity_type`. If not +// specified, the agent's default language is used. +// [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentEntityTypesPatchCall) LanguageCode(languageCode string) *ProjectsAgentEntityTypesPatchCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// UpdateMask sets the optional parameter "updateMask": The mask to +// control which fields get updated. +func (c *ProjectsAgentEntityTypesPatchCall) UpdateMask(updateMask string) *ProjectsAgentEntityTypesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesPatchCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesPatchCall) Context(ctx context.Context) *ProjectsAgentEntityTypesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2entitytype) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.patch" call. +// Exactly one of *GoogleCloudDialogflowV2EntityType or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2EntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentEntityTypesPatchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2EntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2EntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}", + // "httpMethod": "PATCH", + // "id": "dialogflow.projects.agent.entityTypes.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "languageCode": { + // "description": "Optional. The language of entity synonyms defined in `entity_type`. If not\nspecified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of the entity type. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. The mask to control which fields get updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GoogleCloudDialogflowV2EntityType" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2EntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.entities.batchCreate": + +type ProjectsAgentEntityTypesEntitiesBatchCreateCall struct { + s *Service + parent string + googleclouddialogflowv2batchcreateentitiesrequest *GoogleCloudDialogflowV2BatchCreateEntitiesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchCreate: Creates multiple new entities in the specified entity +// type (extends the +// existing collection of entries). +// +// Operation +func (r *ProjectsAgentEntityTypesEntitiesService) BatchCreate(parent string, googleclouddialogflowv2batchcreateentitiesrequest *GoogleCloudDialogflowV2BatchCreateEntitiesRequest) *ProjectsAgentEntityTypesEntitiesBatchCreateCall { + c := &ProjectsAgentEntityTypesEntitiesBatchCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchcreateentitiesrequest = googleclouddialogflowv2batchcreateentitiesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesEntitiesBatchCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesEntitiesBatchCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesEntitiesBatchCreateCall) Context(ctx context.Context) *ProjectsAgentEntityTypesEntitiesBatchCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesEntitiesBatchCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesEntitiesBatchCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchcreateentitiesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entities:batchCreate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.entities.batchCreate" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentEntityTypesEntitiesBatchCreateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates multiple new entities in the specified entity type (extends the\nexisting collection of entries).\n\nOperation \u003cresponse: google.protobuf.Empty\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}/entities:batchCreate", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.entityTypes.entities.batchCreate", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the entity type to create entities in. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entities:batchCreate", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchCreateEntitiesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.entities.batchDelete": + +type ProjectsAgentEntityTypesEntitiesBatchDeleteCall struct { + s *Service + parent string + googleclouddialogflowv2batchdeleteentitiesrequest *GoogleCloudDialogflowV2BatchDeleteEntitiesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchDelete: Deletes entities in the specified entity +// type. +// +// Operation +func (r *ProjectsAgentEntityTypesEntitiesService) BatchDelete(parent string, googleclouddialogflowv2batchdeleteentitiesrequest *GoogleCloudDialogflowV2BatchDeleteEntitiesRequest) *ProjectsAgentEntityTypesEntitiesBatchDeleteCall { + c := &ProjectsAgentEntityTypesEntitiesBatchDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchdeleteentitiesrequest = googleclouddialogflowv2batchdeleteentitiesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesEntitiesBatchDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesEntitiesBatchDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesEntitiesBatchDeleteCall) Context(ctx context.Context) *ProjectsAgentEntityTypesEntitiesBatchDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesEntitiesBatchDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesEntitiesBatchDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchdeleteentitiesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entities:batchDelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.entities.batchDelete" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentEntityTypesEntitiesBatchDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes entities in the specified entity type.\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}/entities:batchDelete", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.entityTypes.entities.batchDelete", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the entity type to delete entries for. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entities:batchDelete", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchDeleteEntitiesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.entityTypes.entities.batchUpdate": + +type ProjectsAgentEntityTypesEntitiesBatchUpdateCall struct { + s *Service + parent string + googleclouddialogflowv2batchupdateentitiesrequest *GoogleCloudDialogflowV2BatchUpdateEntitiesRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchUpdate: Updates entities in the specified entity type (replaces +// the existing +// collection of entries). +// +// Operation +func (r *ProjectsAgentEntityTypesEntitiesService) BatchUpdate(parent string, googleclouddialogflowv2batchupdateentitiesrequest *GoogleCloudDialogflowV2BatchUpdateEntitiesRequest) *ProjectsAgentEntityTypesEntitiesBatchUpdateCall { + c := &ProjectsAgentEntityTypesEntitiesBatchUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchupdateentitiesrequest = googleclouddialogflowv2batchupdateentitiesrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentEntityTypesEntitiesBatchUpdateCall) Fields(s ...googleapi.Field) *ProjectsAgentEntityTypesEntitiesBatchUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentEntityTypesEntitiesBatchUpdateCall) Context(ctx context.Context) *ProjectsAgentEntityTypesEntitiesBatchUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentEntityTypesEntitiesBatchUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentEntityTypesEntitiesBatchUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchupdateentitiesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entities:batchUpdate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.entityTypes.entities.batchUpdate" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentEntityTypesEntitiesBatchUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates entities in the specified entity type (replaces the existing\ncollection of entries).\n\nOperation \u003cresponse: google.protobuf.Empty,\n metadata: google.protobuf.Struct\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/entityTypes/{entityTypesId}/entities:batchUpdate", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.entityTypes.entities.batchUpdate", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the entity type to update the entities in. Format:\n`projects/\u003cProject ID\u003e/agent/entityTypes/\u003cEntity Type ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entities:batchUpdate", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchUpdateEntitiesRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.intents.batchDelete": + +type ProjectsAgentIntentsBatchDeleteCall struct { + s *Service + parent string + googleclouddialogflowv2batchdeleteintentsrequest *GoogleCloudDialogflowV2BatchDeleteIntentsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchDelete: Deletes intents in the specified agent. +// +// Operation +func (r *ProjectsAgentIntentsService) BatchDelete(parent string, googleclouddialogflowv2batchdeleteintentsrequest *GoogleCloudDialogflowV2BatchDeleteIntentsRequest) *ProjectsAgentIntentsBatchDeleteCall { + c := &ProjectsAgentIntentsBatchDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchdeleteintentsrequest = googleclouddialogflowv2batchdeleteintentsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsBatchDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsBatchDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsBatchDeleteCall) Context(ctx context.Context) *ProjectsAgentIntentsBatchDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsBatchDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsBatchDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchdeleteintentsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/intents:batchDelete") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.batchDelete" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentIntentsBatchDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes intents in the specified agent.\n\nOperation \u003cresponse: google.protobuf.Empty\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/intents:batchDelete", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.intents.batchDelete", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the agent to delete all entities types for. Format:\n`projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/intents:batchDelete", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchDeleteIntentsRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.intents.batchUpdate": + +type ProjectsAgentIntentsBatchUpdateCall struct { + s *Service + parent string + googleclouddialogflowv2batchupdateintentsrequest *GoogleCloudDialogflowV2BatchUpdateIntentsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BatchUpdate: Updates/Creates multiple intents in the specified +// agent. +// +// Operation +func (r *ProjectsAgentIntentsService) BatchUpdate(parent string, googleclouddialogflowv2batchupdateintentsrequest *GoogleCloudDialogflowV2BatchUpdateIntentsRequest) *ProjectsAgentIntentsBatchUpdateCall { + c := &ProjectsAgentIntentsBatchUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2batchupdateintentsrequest = googleclouddialogflowv2batchupdateintentsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsBatchUpdateCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsBatchUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsBatchUpdateCall) Context(ctx context.Context) *ProjectsAgentIntentsBatchUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsBatchUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsBatchUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2batchupdateintentsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/intents:batchUpdate") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.batchUpdate" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentIntentsBatchUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates/Creates multiple intents in the specified agent.\n\nOperation \u003cresponse: BatchUpdateIntentsResponse\u003e", + // "flatPath": "v2/projects/{projectsId}/agent/intents:batchUpdate", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.intents.batchUpdate", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the agent to update or create intents in.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/intents:batchUpdate", + // "request": { + // "$ref": "GoogleCloudDialogflowV2BatchUpdateIntentsRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.intents.create": + +type ProjectsAgentIntentsCreateCall struct { + s *Service + parent string + googleclouddialogflowv2intent *GoogleCloudDialogflowV2Intent + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an intent in the specified agent. +func (r *ProjectsAgentIntentsService) Create(parent string, googleclouddialogflowv2intent *GoogleCloudDialogflowV2Intent) *ProjectsAgentIntentsCreateCall { + c := &ProjectsAgentIntentsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2intent = googleclouddialogflowv2intent + return c +} + +// IntentView sets the optional parameter "intentView": The resource +// view to apply to the returned intent. +// +// Possible values: +// "INTENT_VIEW_UNSPECIFIED" +// "INTENT_VIEW_FULL" +func (c *ProjectsAgentIntentsCreateCall) IntentView(intentView string) *ProjectsAgentIntentsCreateCall { + c.urlParams_.Set("intentView", intentView) + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// of training phrases, parameters and rich messages +// defined in `intent`. If not specified, the agent's default language +// is +// used. [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentIntentsCreateCall) LanguageCode(languageCode string) *ProjectsAgentIntentsCreateCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsCreateCall) Context(ctx context.Context) *ProjectsAgentIntentsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2intent) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/intents") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.create" call. +// Exactly one of *GoogleCloudDialogflowV2Intent or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Intent.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentIntentsCreateCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Intent, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Intent{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an intent in the specified agent.", + // "flatPath": "v2/projects/{projectsId}/agent/intents", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.intents.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "intentView": { + // "description": "Optional. The resource view to apply to the returned intent.", + // "enum": [ + // "INTENT_VIEW_UNSPECIFIED", + // "INTENT_VIEW_FULL" + // ], + // "location": "query", + // "type": "string" + // }, + // "languageCode": { + // "description": "Optional. The language of training phrases, parameters and rich messages\ndefined in `intent`. If not specified, the agent's default language is\nused. [More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The agent to create a intent for.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/intents", + // "request": { + // "$ref": "GoogleCloudDialogflowV2Intent" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2Intent" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.intents.delete": + +type ProjectsAgentIntentsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified intent. +func (r *ProjectsAgentIntentsService) Delete(name string) *ProjectsAgentIntentsDeleteCall { + c := &ProjectsAgentIntentsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsDeleteCall) Context(ctx context.Context) *ProjectsAgentIntentsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentIntentsDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified intent.", + // "flatPath": "v2/projects/{projectsId}/agent/intents/{intentsId}", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.intents.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the intent to delete.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/intents/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.intents.get": + +type ProjectsAgentIntentsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified intent. +func (r *ProjectsAgentIntentsService) Get(name string) *ProjectsAgentIntentsGetCall { + c := &ProjectsAgentIntentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// IntentView sets the optional parameter "intentView": The resource +// view to apply to the returned intent. +// +// Possible values: +// "INTENT_VIEW_UNSPECIFIED" +// "INTENT_VIEW_FULL" +func (c *ProjectsAgentIntentsGetCall) IntentView(intentView string) *ProjectsAgentIntentsGetCall { + c.urlParams_.Set("intentView", intentView) + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// to retrieve training phrases, parameters and rich +// messages for. If not specified, the agent's default language is +// used. +// [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentIntentsGetCall) LanguageCode(languageCode string) *ProjectsAgentIntentsGetCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsGetCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentIntentsGetCall) IfNoneMatch(entityTag string) *ProjectsAgentIntentsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsGetCall) Context(ctx context.Context) *ProjectsAgentIntentsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.get" call. +// Exactly one of *GoogleCloudDialogflowV2Intent or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Intent.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentIntentsGetCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Intent, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Intent{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified intent.", + // "flatPath": "v2/projects/{projectsId}/agent/intents/{intentsId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.intents.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "intentView": { + // "description": "Optional. The resource view to apply to the returned intent.", + // "enum": [ + // "INTENT_VIEW_UNSPECIFIED", + // "INTENT_VIEW_FULL" + // ], + // "location": "query", + // "type": "string" + // }, + // "languageCode": { + // "description": "Optional. The language to retrieve training phrases, parameters and rich\nmessages for. If not specified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required. The name of the intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/intents/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleCloudDialogflowV2Intent" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.intents.list": + +type ProjectsAgentIntentsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of all intents in the specified agent. +func (r *ProjectsAgentIntentsService) List(parent string) *ProjectsAgentIntentsListCall { + c := &ProjectsAgentIntentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// IntentView sets the optional parameter "intentView": The resource +// view to apply to the returned intent. +// +// Possible values: +// "INTENT_VIEW_UNSPECIFIED" +// "INTENT_VIEW_FULL" +func (c *ProjectsAgentIntentsListCall) IntentView(intentView string) *ProjectsAgentIntentsListCall { + c.urlParams_.Set("intentView", intentView) + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// to list training phrases, parameters and rich +// messages for. If not specified, the agent's default language is +// used. +// [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent before they can be used. +func (c *ProjectsAgentIntentsListCall) LanguageCode(languageCode string) *ProjectsAgentIntentsListCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentIntentsListCall) PageSize(pageSize int64) *ProjectsAgentIntentsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentIntentsListCall) PageToken(pageToken string) *ProjectsAgentIntentsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsListCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentIntentsListCall) IfNoneMatch(entityTag string) *ProjectsAgentIntentsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsListCall) Context(ctx context.Context) *ProjectsAgentIntentsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/intents") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.list" call. +// Exactly one of *GoogleCloudDialogflowV2ListIntentsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2ListIntentsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentIntentsListCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2ListIntentsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2ListIntentsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of all intents in the specified agent.", + // "flatPath": "v2/projects/{projectsId}/agent/intents", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.intents.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "intentView": { + // "description": "Optional. The resource view to apply to the returned intent.", + // "enum": [ + // "INTENT_VIEW_UNSPECIFIED", + // "INTENT_VIEW_FULL" + // ], + // "location": "query", + // "type": "string" + // }, + // "languageCode": { + // "description": "Optional. The language to list training phrases, parameters and rich\nmessages for. If not specified, the agent's default language is used.\n[More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The agent to list all intents from.\nFormat: `projects/\u003cProject ID\u003e/agent`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/intents", + // "response": { + // "$ref": "GoogleCloudDialogflowV2ListIntentsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentIntentsListCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2ListIntentsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.intents.patch": + +type ProjectsAgentIntentsPatchCall struct { + s *Service + nameid string + googleclouddialogflowv2intent *GoogleCloudDialogflowV2Intent + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified intent. +func (r *ProjectsAgentIntentsService) Patch(nameid string, googleclouddialogflowv2intent *GoogleCloudDialogflowV2Intent) *ProjectsAgentIntentsPatchCall { + c := &ProjectsAgentIntentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.googleclouddialogflowv2intent = googleclouddialogflowv2intent + return c +} + +// IntentView sets the optional parameter "intentView": The resource +// view to apply to the returned intent. +// +// Possible values: +// "INTENT_VIEW_UNSPECIFIED" +// "INTENT_VIEW_FULL" +func (c *ProjectsAgentIntentsPatchCall) IntentView(intentView string) *ProjectsAgentIntentsPatchCall { + c.urlParams_.Set("intentView", intentView) + return c +} + +// LanguageCode sets the optional parameter "languageCode": The language +// of training phrases, parameters and rich messages +// defined in `intent`. If not specified, the agent's default language +// is +// used. [More than a +// dozen +// languages](https://dialogflow.com/docs/reference/language) are +// supported. +// Note: languages must be enabled in the agent, before they can be +// used. +func (c *ProjectsAgentIntentsPatchCall) LanguageCode(languageCode string) *ProjectsAgentIntentsPatchCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// UpdateMask sets the optional parameter "updateMask": The mask to +// control which fields get updated. +func (c *ProjectsAgentIntentsPatchCall) UpdateMask(updateMask string) *ProjectsAgentIntentsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentIntentsPatchCall) Fields(s ...googleapi.Field) *ProjectsAgentIntentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentIntentsPatchCall) Context(ctx context.Context) *ProjectsAgentIntentsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentIntentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentIntentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2intent) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.intents.patch" call. +// Exactly one of *GoogleCloudDialogflowV2Intent or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Intent.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentIntentsPatchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Intent, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Intent{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified intent.", + // "flatPath": "v2/projects/{projectsId}/agent/intents/{intentsId}", + // "httpMethod": "PATCH", + // "id": "dialogflow.projects.agent.intents.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "intentView": { + // "description": "Optional. The resource view to apply to the returned intent.", + // "enum": [ + // "INTENT_VIEW_UNSPECIFIED", + // "INTENT_VIEW_FULL" + // ], + // "location": "query", + // "type": "string" + // }, + // "languageCode": { + // "description": "Optional. The language of training phrases, parameters and rich messages\ndefined in `intent`. If not specified, the agent's default language is\nused. [More than a dozen\nlanguages](https://dialogflow.com/docs/reference/language) are supported.\nNote: languages must be enabled in the agent, before they can be used.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Required for all methods except `create` (`create` populates the name\nautomatically.\nThe unique identifier of this intent.\nFormat: `projects/\u003cProject ID\u003e/agent/intents/\u003cIntent ID\u003e`.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/intents/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. The mask to control which fields get updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GoogleCloudDialogflowV2Intent" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2Intent" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.deleteContexts": + +type ProjectsAgentRuntimesSessionsDeleteContextsCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteContexts: Deletes all active contexts in the specified session. +func (r *ProjectsAgentRuntimesSessionsService) DeleteContexts(parent string) *ProjectsAgentRuntimesSessionsDeleteContextsCall { + c := &ProjectsAgentRuntimesSessionsDeleteContextsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsDeleteContextsCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsDeleteContextsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsDeleteContextsCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsDeleteContextsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsDeleteContextsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsDeleteContextsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/contexts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.deleteContexts" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRuntimesSessionsDeleteContextsCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes all active contexts in the specified session.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.runtimes.sessions.deleteContexts", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the session to delete all contexts from. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or `projects/\u003cProject\nID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`. Note: Runtimes are\nunder construction and will be available soon. If \u003cRuntime ID\u003e is not\nspecified we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/contexts", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.detectIntent": + +type ProjectsAgentRuntimesSessionsDetectIntentCall struct { + s *Service + sessionid string + googleclouddialogflowv2detectintentrequest *GoogleCloudDialogflowV2DetectIntentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DetectIntent: Processes a natural language query and returns +// structured, actionable data +// as a result. This method is not idempotent, because it may cause +// contexts +// and session entity types to be updated, which in turn might +// affect +// results of future queries. +func (r *ProjectsAgentRuntimesSessionsService) DetectIntent(sessionid string, googleclouddialogflowv2detectintentrequest *GoogleCloudDialogflowV2DetectIntentRequest) *ProjectsAgentRuntimesSessionsDetectIntentCall { + c := &ProjectsAgentRuntimesSessionsDetectIntentCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sessionid = sessionid + c.googleclouddialogflowv2detectintentrequest = googleclouddialogflowv2detectintentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsDetectIntentCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsDetectIntentCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsDetectIntentCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsDetectIntentCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsDetectIntentCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsDetectIntentCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2detectintentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+session}:detectIntent") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.sessionid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.detectIntent" call. +// Exactly one of *GoogleCloudDialogflowV2DetectIntentResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2DetectIntentResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentRuntimesSessionsDetectIntentCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2DetectIntentResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2DetectIntentResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Processes a natural language query and returns structured, actionable data\nas a result. This method is not idempotent, because it may cause contexts\nand session entity types to be updated, which in turn might affect\nresults of future queries.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}:detectIntent", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.runtimes.sessions.detectIntent", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The name of the session this query is sent to. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.\nIt's up to the API caller to choose an appropriate session ID. It can be\na random number or some type of user identifier (preferably hashed).\nThe length of the session ID must not exceed 36 bytes.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+session}:detectIntent", + // "request": { + // "$ref": "GoogleCloudDialogflowV2DetectIntentRequest" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2DetectIntentResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.contexts.create": + +type ProjectsAgentRuntimesSessionsContextsCreateCall struct { + s *Service + parent string + googleclouddialogflowv2context *GoogleCloudDialogflowV2Context + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a context. +func (r *ProjectsAgentRuntimesSessionsContextsService) Create(parent string, googleclouddialogflowv2context *GoogleCloudDialogflowV2Context) *ProjectsAgentRuntimesSessionsContextsCreateCall { + c := &ProjectsAgentRuntimesSessionsContextsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2context = googleclouddialogflowv2context + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsContextsCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsContextsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsContextsCreateCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsContextsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsContextsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsContextsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2context) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/contexts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.contexts.create" call. +// Exactly one of *GoogleCloudDialogflowV2Context or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Context.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRuntimesSessionsContextsCreateCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Context, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Context{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a context.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.runtimes.sessions.contexts.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The session to create a context for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/contexts", + // "request": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.contexts.delete": + +type ProjectsAgentRuntimesSessionsContextsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified context. +func (r *ProjectsAgentRuntimesSessionsContextsService) Delete(name string) *ProjectsAgentRuntimesSessionsContextsDeleteCall { + c := &ProjectsAgentRuntimesSessionsContextsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsContextsDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsContextsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsContextsDeleteCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsContextsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsContextsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsContextsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.contexts.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRuntimesSessionsContextsDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified context.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts/{contextsId}", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.runtimes.sessions.contexts.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the context to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/contexts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.contexts.get": + +type ProjectsAgentRuntimesSessionsContextsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified context. +func (r *ProjectsAgentRuntimesSessionsContextsService) Get(name string) *ProjectsAgentRuntimesSessionsContextsGetCall { + c := &ProjectsAgentRuntimesSessionsContextsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsContextsGetCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsContextsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentRuntimesSessionsContextsGetCall) IfNoneMatch(entityTag string) *ProjectsAgentRuntimesSessionsContextsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsContextsGetCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsContextsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsContextsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsContextsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.contexts.get" call. +// Exactly one of *GoogleCloudDialogflowV2Context or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Context.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRuntimesSessionsContextsGetCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Context, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Context{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified context.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts/{contextsId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.runtimes.sessions.contexts.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/contexts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.contexts.list": + +type ProjectsAgentRuntimesSessionsContextsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of all contexts in the specified session. +func (r *ProjectsAgentRuntimesSessionsContextsService) List(parent string) *ProjectsAgentRuntimesSessionsContextsListCall { + c := &ProjectsAgentRuntimesSessionsContextsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) PageSize(pageSize int64) *ProjectsAgentRuntimesSessionsContextsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) PageToken(pageToken string) *ProjectsAgentRuntimesSessionsContextsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsContextsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) IfNoneMatch(entityTag string) *ProjectsAgentRuntimesSessionsContextsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsContextsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsContextsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/contexts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.contexts.list" call. +// Exactly one of *GoogleCloudDialogflowV2ListContextsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2ListContextsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2ListContextsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2ListContextsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of all contexts in the specified session.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.runtimes.sessions.contexts.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The session to list all contexts from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/contexts", + // "response": { + // "$ref": "GoogleCloudDialogflowV2ListContextsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentRuntimesSessionsContextsListCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2ListContextsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.runtimes.sessions.contexts.patch": + +type ProjectsAgentRuntimesSessionsContextsPatchCall struct { + s *Service + nameid string + googleclouddialogflowv2context *GoogleCloudDialogflowV2Context + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified context. +func (r *ProjectsAgentRuntimesSessionsContextsService) Patch(nameid string, googleclouddialogflowv2context *GoogleCloudDialogflowV2Context) *ProjectsAgentRuntimesSessionsContextsPatchCall { + c := &ProjectsAgentRuntimesSessionsContextsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.googleclouddialogflowv2context = googleclouddialogflowv2context + return c +} + +// UpdateMask sets the optional parameter "updateMask": The mask to +// control which fields get updated. +func (c *ProjectsAgentRuntimesSessionsContextsPatchCall) UpdateMask(updateMask string) *ProjectsAgentRuntimesSessionsContextsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsContextsPatchCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsContextsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsContextsPatchCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsContextsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsContextsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsContextsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2context) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.contexts.patch" call. +// Exactly one of *GoogleCloudDialogflowV2Context or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Context.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRuntimesSessionsContextsPatchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Context, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Context{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified context.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/contexts/{contextsId}", + // "httpMethod": "PATCH", + // "id": "dialogflow.projects.agent.runtimes.sessions.contexts.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The unique identifier of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`,\nor\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nThe Context ID is always converted to lowercase.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/contexts/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. The mask to control which fields get updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.entityTypes.create": + +type ProjectsAgentRuntimesSessionsEntityTypesCreateCall struct { + s *Service + parent string + googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a session entity type. +func (r *ProjectsAgentRuntimesSessionsEntityTypesService) Create(parent string, googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType) *ProjectsAgentRuntimesSessionsEntityTypesCreateCall { + c := &ProjectsAgentRuntimesSessionsEntityTypesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2sessionentitytype = googleclouddialogflowv2sessionentitytype + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsEntityTypesCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsEntityTypesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsEntityTypesCreateCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsEntityTypesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsEntityTypesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsEntityTypesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2sessionentitytype) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.entityTypes.create" call. +// Exactly one of *GoogleCloudDialogflowV2SessionEntityType or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SessionEntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentRuntimesSessionsEntityTypesCreateCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SessionEntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SessionEntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The session to create a session entity type for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes", + // "request": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.entityTypes.delete": + +type ProjectsAgentRuntimesSessionsEntityTypesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified session entity type. +func (r *ProjectsAgentRuntimesSessionsEntityTypesService) Delete(name string) *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall { + c := &ProjectsAgentRuntimesSessionsEntityTypesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.entityTypes.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentRuntimesSessionsEntityTypesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes/{entityTypesId}", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the entity type to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.entityTypes.get": + +type ProjectsAgentRuntimesSessionsEntityTypesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified session entity type. +func (r *ProjectsAgentRuntimesSessionsEntityTypesService) Get(name string) *ProjectsAgentRuntimesSessionsEntityTypesGetCall { + c := &ProjectsAgentRuntimesSessionsEntityTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsEntityTypesGetCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsEntityTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentRuntimesSessionsEntityTypesGetCall) IfNoneMatch(entityTag string) *ProjectsAgentRuntimesSessionsEntityTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsEntityTypesGetCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsEntityTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsEntityTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsEntityTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.entityTypes.get" call. +// Exactly one of *GoogleCloudDialogflowV2SessionEntityType or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SessionEntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentRuntimesSessionsEntityTypesGetCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SessionEntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SessionEntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes/{entityTypesId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.runtimes.sessions.entityTypes.list": + +type ProjectsAgentRuntimesSessionsEntityTypesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of all session entity types in the specified +// session. +func (r *ProjectsAgentRuntimesSessionsEntityTypesService) List(parent string) *ProjectsAgentRuntimesSessionsEntityTypesListCall { + c := &ProjectsAgentRuntimesSessionsEntityTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) PageSize(pageSize int64) *ProjectsAgentRuntimesSessionsEntityTypesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) PageToken(pageToken string) *ProjectsAgentRuntimesSessionsEntityTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsEntityTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) IfNoneMatch(entityTag string) *ProjectsAgentRuntimesSessionsEntityTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsEntityTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.entityTypes.list" call. +// Exactly one of *GoogleCloudDialogflowV2ListSessionEntityTypesResponse +// or error will be non-nil. Any non-2xx status code is an error. +// Response headers are in either +// *GoogleCloudDialogflowV2ListSessionEntityTypesResponse.ServerResponse. +// Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2ListSessionEntityTypesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2ListSessionEntityTypesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of all session entity types in the specified session.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The session to list all session entity types from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes", + // "response": { + // "$ref": "GoogleCloudDialogflowV2ListSessionEntityTypesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentRuntimesSessionsEntityTypesListCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2ListSessionEntityTypesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.runtimes.sessions.entityTypes.patch": + +type ProjectsAgentRuntimesSessionsEntityTypesPatchCall struct { + s *Service + nameid string + googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified session entity type. +func (r *ProjectsAgentRuntimesSessionsEntityTypesService) Patch(nameid string, googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType) *ProjectsAgentRuntimesSessionsEntityTypesPatchCall { + c := &ProjectsAgentRuntimesSessionsEntityTypesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.googleclouddialogflowv2sessionentitytype = googleclouddialogflowv2sessionentitytype + return c +} + +// UpdateMask sets the optional parameter "updateMask": The mask to +// control which fields get updated. +func (c *ProjectsAgentRuntimesSessionsEntityTypesPatchCall) UpdateMask(updateMask string) *ProjectsAgentRuntimesSessionsEntityTypesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentRuntimesSessionsEntityTypesPatchCall) Fields(s ...googleapi.Field) *ProjectsAgentRuntimesSessionsEntityTypesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentRuntimesSessionsEntityTypesPatchCall) Context(ctx context.Context) *ProjectsAgentRuntimesSessionsEntityTypesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentRuntimesSessionsEntityTypesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentRuntimesSessionsEntityTypesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2sessionentitytype) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.runtimes.sessions.entityTypes.patch" call. +// Exactly one of *GoogleCloudDialogflowV2SessionEntityType or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SessionEntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentRuntimesSessionsEntityTypesPatchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SessionEntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SessionEntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/runtimes/{runtimesId}/sessions/{sessionsId}/entityTypes/{entityTypesId}", + // "httpMethod": "PATCH", + // "id": "dialogflow.projects.agent.runtimes.sessions.entityTypes.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The unique identifier of this session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003esessions/\u003cSession\nID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/runtimes/[^/]+/sessions/[^/]+/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. The mask to control which fields get updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.deleteContexts": + +type ProjectsAgentSessionsDeleteContextsCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DeleteContexts: Deletes all active contexts in the specified session. +func (r *ProjectsAgentSessionsService) DeleteContexts(parent string) *ProjectsAgentSessionsDeleteContextsCall { + c := &ProjectsAgentSessionsDeleteContextsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsDeleteContextsCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsDeleteContextsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsDeleteContextsCall) Context(ctx context.Context) *ProjectsAgentSessionsDeleteContextsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsDeleteContextsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsDeleteContextsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/contexts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.deleteContexts" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentSessionsDeleteContextsCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes all active contexts in the specified session.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.sessions.deleteContexts", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The name of the session to delete all contexts from. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or `projects/\u003cProject\nID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`. Note: Runtimes are\nunder construction and will be available soon. If \u003cRuntime ID\u003e is not\nspecified we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/contexts", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.detectIntent": + +type ProjectsAgentSessionsDetectIntentCall struct { + s *Service + sessionid string + googleclouddialogflowv2detectintentrequest *GoogleCloudDialogflowV2DetectIntentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DetectIntent: Processes a natural language query and returns +// structured, actionable data +// as a result. This method is not idempotent, because it may cause +// contexts +// and session entity types to be updated, which in turn might +// affect +// results of future queries. +func (r *ProjectsAgentSessionsService) DetectIntent(sessionid string, googleclouddialogflowv2detectintentrequest *GoogleCloudDialogflowV2DetectIntentRequest) *ProjectsAgentSessionsDetectIntentCall { + c := &ProjectsAgentSessionsDetectIntentCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sessionid = sessionid + c.googleclouddialogflowv2detectintentrequest = googleclouddialogflowv2detectintentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsDetectIntentCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsDetectIntentCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsDetectIntentCall) Context(ctx context.Context) *ProjectsAgentSessionsDetectIntentCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsDetectIntentCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsDetectIntentCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2detectintentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+session}:detectIntent") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "session": c.sessionid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.detectIntent" call. +// Exactly one of *GoogleCloudDialogflowV2DetectIntentResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2DetectIntentResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSessionsDetectIntentCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2DetectIntentResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2DetectIntentResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Processes a natural language query and returns structured, actionable data\nas a result. This method is not idempotent, because it may cause contexts\nand session entity types to be updated, which in turn might affect\nresults of future queries.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}:detectIntent", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.sessions.detectIntent", + // "parameterOrder": [ + // "session" + // ], + // "parameters": { + // "session": { + // "description": "Required. The name of the session this query is sent to. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.\nIt's up to the API caller to choose an appropriate session ID. It can be\na random number or some type of user identifier (preferably hashed).\nThe length of the session ID must not exceed 36 bytes.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+session}:detectIntent", + // "request": { + // "$ref": "GoogleCloudDialogflowV2DetectIntentRequest" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2DetectIntentResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.contexts.create": + +type ProjectsAgentSessionsContextsCreateCall struct { + s *Service + parent string + googleclouddialogflowv2context *GoogleCloudDialogflowV2Context + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a context. +func (r *ProjectsAgentSessionsContextsService) Create(parent string, googleclouddialogflowv2context *GoogleCloudDialogflowV2Context) *ProjectsAgentSessionsContextsCreateCall { + c := &ProjectsAgentSessionsContextsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2context = googleclouddialogflowv2context + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsContextsCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsContextsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsContextsCreateCall) Context(ctx context.Context) *ProjectsAgentSessionsContextsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsContextsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsContextsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2context) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/contexts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.contexts.create" call. +// Exactly one of *GoogleCloudDialogflowV2Context or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Context.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentSessionsContextsCreateCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Context, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Context{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a context.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.sessions.contexts.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The session to create a context for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/contexts", + // "request": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.contexts.delete": + +type ProjectsAgentSessionsContextsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified context. +func (r *ProjectsAgentSessionsContextsService) Delete(name string) *ProjectsAgentSessionsContextsDeleteCall { + c := &ProjectsAgentSessionsContextsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsContextsDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsContextsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsContextsDeleteCall) Context(ctx context.Context) *ProjectsAgentSessionsContextsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsContextsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsContextsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.contexts.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentSessionsContextsDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified context.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts/{contextsId}", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.sessions.contexts.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the context to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+/contexts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.contexts.get": + +type ProjectsAgentSessionsContextsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified context. +func (r *ProjectsAgentSessionsContextsService) Get(name string) *ProjectsAgentSessionsContextsGetCall { + c := &ProjectsAgentSessionsContextsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsContextsGetCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsContextsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentSessionsContextsGetCall) IfNoneMatch(entityTag string) *ProjectsAgentSessionsContextsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsContextsGetCall) Context(ctx context.Context) *ProjectsAgentSessionsContextsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsContextsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsContextsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.contexts.get" call. +// Exactly one of *GoogleCloudDialogflowV2Context or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Context.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentSessionsContextsGetCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Context, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Context{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified context.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts/{contextsId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.sessions.contexts.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`\nor `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`. Note: Runtimes are under construction and will\nbe available soon. If \u003cRuntime ID\u003e is not specified, we assume default\n'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+/contexts/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.contexts.list": + +type ProjectsAgentSessionsContextsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of all contexts in the specified session. +func (r *ProjectsAgentSessionsContextsService) List(parent string) *ProjectsAgentSessionsContextsListCall { + c := &ProjectsAgentSessionsContextsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentSessionsContextsListCall) PageSize(pageSize int64) *ProjectsAgentSessionsContextsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentSessionsContextsListCall) PageToken(pageToken string) *ProjectsAgentSessionsContextsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsContextsListCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsContextsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentSessionsContextsListCall) IfNoneMatch(entityTag string) *ProjectsAgentSessionsContextsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsContextsListCall) Context(ctx context.Context) *ProjectsAgentSessionsContextsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsContextsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsContextsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/contexts") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.contexts.list" call. +// Exactly one of *GoogleCloudDialogflowV2ListContextsResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2ListContextsResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSessionsContextsListCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2ListContextsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2ListContextsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of all contexts in the specified session.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.sessions.contexts.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The session to list all contexts from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/contexts", + // "response": { + // "$ref": "GoogleCloudDialogflowV2ListContextsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentSessionsContextsListCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2ListContextsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.sessions.contexts.patch": + +type ProjectsAgentSessionsContextsPatchCall struct { + s *Service + nameid string + googleclouddialogflowv2context *GoogleCloudDialogflowV2Context + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified context. +func (r *ProjectsAgentSessionsContextsService) Patch(nameid string, googleclouddialogflowv2context *GoogleCloudDialogflowV2Context) *ProjectsAgentSessionsContextsPatchCall { + c := &ProjectsAgentSessionsContextsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.googleclouddialogflowv2context = googleclouddialogflowv2context + return c +} + +// UpdateMask sets the optional parameter "updateMask": The mask to +// control which fields get updated. +func (c *ProjectsAgentSessionsContextsPatchCall) UpdateMask(updateMask string) *ProjectsAgentSessionsContextsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsContextsPatchCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsContextsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsContextsPatchCall) Context(ctx context.Context) *ProjectsAgentSessionsContextsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsContextsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsContextsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2context) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.contexts.patch" call. +// Exactly one of *GoogleCloudDialogflowV2Context or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GoogleCloudDialogflowV2Context.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentSessionsContextsPatchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2Context, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2Context{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified context.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/contexts/{contextsId}", + // "httpMethod": "PATCH", + // "id": "dialogflow.projects.agent.sessions.contexts.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The unique identifier of the context. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/contexts/\u003cContext ID\u003e`,\nor\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession\nID\u003e/contexts/\u003cContext ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nThe Context ID is always converted to lowercase.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+/contexts/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. The mask to control which fields get updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2Context" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.entityTypes.create": + +type ProjectsAgentSessionsEntityTypesCreateCall struct { + s *Service + parent string + googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a session entity type. +func (r *ProjectsAgentSessionsEntityTypesService) Create(parent string, googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType) *ProjectsAgentSessionsEntityTypesCreateCall { + c := &ProjectsAgentSessionsEntityTypesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleclouddialogflowv2sessionentitytype = googleclouddialogflowv2sessionentitytype + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsEntityTypesCreateCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsEntityTypesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsEntityTypesCreateCall) Context(ctx context.Context) *ProjectsAgentSessionsEntityTypesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsEntityTypesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsEntityTypesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2sessionentitytype) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.entityTypes.create" call. +// Exactly one of *GoogleCloudDialogflowV2SessionEntityType or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SessionEntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSessionsEntityTypesCreateCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SessionEntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SessionEntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes", + // "httpMethod": "POST", + // "id": "dialogflow.projects.agent.sessions.entityTypes.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "Required. The session to create a session entity type for.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes", + // "request": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.entityTypes.delete": + +type ProjectsAgentSessionsEntityTypesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified session entity type. +func (r *ProjectsAgentSessionsEntityTypesService) Delete(name string) *ProjectsAgentSessionsEntityTypesDeleteCall { + c := &ProjectsAgentSessionsEntityTypesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsEntityTypesDeleteCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsEntityTypesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsEntityTypesDeleteCall) Context(ctx context.Context) *ProjectsAgentSessionsEntityTypesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsEntityTypesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsEntityTypesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.entityTypes.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsAgentSessionsEntityTypesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes/{entityTypesId}", + // "httpMethod": "DELETE", + // "id": "dialogflow.projects.agent.sessions.entityTypes.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the entity type to delete. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.entityTypes.get": + +type ProjectsAgentSessionsEntityTypesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves the specified session entity type. +func (r *ProjectsAgentSessionsEntityTypesService) Get(name string) *ProjectsAgentSessionsEntityTypesGetCall { + c := &ProjectsAgentSessionsEntityTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsEntityTypesGetCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsEntityTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentSessionsEntityTypesGetCall) IfNoneMatch(entityTag string) *ProjectsAgentSessionsEntityTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsEntityTypesGetCall) Context(ctx context.Context) *ProjectsAgentSessionsEntityTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsEntityTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsEntityTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.entityTypes.get" call. +// Exactly one of *GoogleCloudDialogflowV2SessionEntityType or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SessionEntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSessionsEntityTypesGetCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SessionEntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SessionEntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the specified session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes/{entityTypesId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.sessions.entityTypes.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The name of the session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e` or `projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime\nID\u003e/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`. Note:\nRuntimes are under construction and will be available soon. If \u003cRuntime ID\u003e\nis not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.agent.sessions.entityTypes.list": + +type ProjectsAgentSessionsEntityTypesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns the list of all session entity types in the specified +// session. +func (r *ProjectsAgentSessionsEntityTypesService) List(parent string) *ProjectsAgentSessionsEntityTypesListCall { + c := &ProjectsAgentSessionsEntityTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return in a single page. By +// default 100 and at most 1000. +func (c *ProjectsAgentSessionsEntityTypesListCall) PageSize(pageSize int64) *ProjectsAgentSessionsEntityTypesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous list request. +func (c *ProjectsAgentSessionsEntityTypesListCall) PageToken(pageToken string) *ProjectsAgentSessionsEntityTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsEntityTypesListCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsEntityTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsAgentSessionsEntityTypesListCall) IfNoneMatch(entityTag string) *ProjectsAgentSessionsEntityTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsEntityTypesListCall) Context(ctx context.Context) *ProjectsAgentSessionsEntityTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsEntityTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsEntityTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/entityTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.entityTypes.list" call. +// Exactly one of *GoogleCloudDialogflowV2ListSessionEntityTypesResponse +// or error will be non-nil. Any non-2xx status code is an error. +// Response headers are in either +// *GoogleCloudDialogflowV2ListSessionEntityTypesResponse.ServerResponse. +// Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSessionsEntityTypesListCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2ListSessionEntityTypesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2ListSessionEntityTypesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the list of all session entity types in the specified session.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes", + // "httpMethod": "GET", + // "id": "dialogflow.projects.agent.sessions.entityTypes.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional. The maximum number of items to return in a single page. By\ndefault 100 and at most 1000.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. The next_page_token value returned from a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "Required. The session to list all session entity types from.\nFormat: `projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e` or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003e/sessions/\u003cSession ID\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/entityTypes", + // "response": { + // "$ref": "GoogleCloudDialogflowV2ListSessionEntityTypesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsAgentSessionsEntityTypesListCall) Pages(ctx context.Context, f func(*GoogleCloudDialogflowV2ListSessionEntityTypesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dialogflow.projects.agent.sessions.entityTypes.patch": + +type ProjectsAgentSessionsEntityTypesPatchCall struct { + s *Service + nameid string + googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the specified session entity type. +func (r *ProjectsAgentSessionsEntityTypesService) Patch(nameid string, googleclouddialogflowv2sessionentitytype *GoogleCloudDialogflowV2SessionEntityType) *ProjectsAgentSessionsEntityTypesPatchCall { + c := &ProjectsAgentSessionsEntityTypesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.nameid = nameid + c.googleclouddialogflowv2sessionentitytype = googleclouddialogflowv2sessionentitytype + return c +} + +// UpdateMask sets the optional parameter "updateMask": The mask to +// control which fields get updated. +func (c *ProjectsAgentSessionsEntityTypesPatchCall) UpdateMask(updateMask string) *ProjectsAgentSessionsEntityTypesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsAgentSessionsEntityTypesPatchCall) Fields(s ...googleapi.Field) *ProjectsAgentSessionsEntityTypesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsAgentSessionsEntityTypesPatchCall) Context(ctx context.Context) *ProjectsAgentSessionsEntityTypesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsAgentSessionsEntityTypesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsAgentSessionsEntityTypesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleclouddialogflowv2sessionentitytype) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.nameid, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.agent.sessions.entityTypes.patch" call. +// Exactly one of *GoogleCloudDialogflowV2SessionEntityType or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GoogleCloudDialogflowV2SessionEntityType.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsAgentSessionsEntityTypesPatchCall) Do(opts ...googleapi.CallOption) (*GoogleCloudDialogflowV2SessionEntityType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleCloudDialogflowV2SessionEntityType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified session entity type.", + // "flatPath": "v2/projects/{projectsId}/agent/sessions/{sessionsId}/entityTypes/{entityTypesId}", + // "httpMethod": "PATCH", + // "id": "dialogflow.projects.agent.sessions.entityTypes.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Required. The unique identifier of this session entity type. Format:\n`projects/\u003cProject ID\u003e/agent/sessions/\u003cSession ID\u003e/entityTypes/\u003cEntity Type\nDisplay Name\u003e`, or\n`projects/\u003cProject ID\u003e/agent/runtimes/\u003cRuntime ID\u003esessions/\u003cSession\nID\u003e/entityTypes/\u003cEntity Type Display Name\u003e`.\nNote: Runtimes are under construction and will be available soon.\nIf \u003cRuntime ID\u003e is not specified, we assume default 'sandbox' runtime.", + // "location": "path", + // "pattern": "^projects/[^/]+/agent/sessions/[^/]+/entityTypes/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Optional. The mask to control which fields get updated.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "response": { + // "$ref": "GoogleCloudDialogflowV2SessionEntityType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dialogflow.projects.operations.get": + +type ProjectsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this +// method to poll the operation result at intervals as recommended by +// the API +// service. +func (r *ProjectsOperationsService) Get(name string) *ProjectsOperationsGetCall { + c := &ProjectsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsOperationsGetCall) IfNoneMatch(entityTag string) *ProjectsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsOperationsGetCall) Context(ctx context.Context) *ProjectsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dialogflow.projects.operations.get" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsOperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "flatPath": "v2/projects/{projectsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "dialogflow.projects.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/dlp/v2/dlp-api.json b/vendor/google.golang.org/api/dlp/v2/dlp-api.json new file mode 100644 index 000000000..f102bb3af --- /dev/null +++ b/vendor/google.golang.org/api/dlp/v2/dlp-api.json @@ -0,0 +1,5083 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + } + } + } + }, + "basePath": "", + "baseUrl": "https://dlp.googleapis.com/", + "batchPath": "batch", + "canonicalName": "DLP", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "discoveryVersion": "v1", + "documentationLink": "https://cloud.google.com/dlp/docs/", + "fullyEncodeReservedExpansion": true, + "icons": { + "x16": "http://www.google.com/images/icons/product/search-16.gif", + "x32": "http://www.google.com/images/icons/product/search-32.gif" + }, + "id": "dlp:v2", + "kind": "discovery#restDescription", + "name": "dlp", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "$.xgafv": { + "description": "V1 error format.", + "enum": [ + "1", + "2" + ], + "enumDescriptions": [ + "v1 error format", + "v2 error format" + ], + "location": "query", + "type": "string" + }, + "access_token": { + "description": "OAuth access token.", + "location": "query", + "type": "string" + }, + "alt": { + "default": "json", + "description": "Data format for response.", + "enum": [ + "json", + "media", + "proto" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json", + "Media download with context-dependent Content-Type", + "Responses with Content-Type of application/x-protobuf" + ], + "location": "query", + "type": "string" + }, + "bearer_token": { + "description": "OAuth bearer token.", + "location": "query", + "type": "string" + }, + "callback": { + "description": "JSONP", + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "pp": { + "default": "true", + "description": "Pretty-print response.", + "location": "query", + "type": "boolean" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "uploadType": { + "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", + "location": "query", + "type": "string" + }, + "upload_protocol": { + "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "infoTypes": { + "methods": { + "list": { + "description": "Returns sensitive information types DLP supports.", + "flatPath": "v2/infoTypes", + "httpMethod": "GET", + "id": "dlp.infoTypes.list", + "parameterOrder": [], + "parameters": { + "filter": { + "description": "Optional filter to only return infoTypes supported by certain parts of the\nAPI. Defaults to supported_by=INSPECT.", + "location": "query", + "type": "string" + }, + "languageCode": { + "description": "Optional BCP-47 language code for localized infoType friendly\nnames. If omitted, or if localized strings are not available,\nen-US strings will be returned.", + "location": "query", + "type": "string" + } + }, + "path": "v2/infoTypes", + "response": { + "$ref": "GooglePrivacyDlpV2ListInfoTypesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "organizations": { + "resources": { + "deidentifyTemplates": { + "methods": { + "create": { + "description": "Creates a de-identify template for re-using frequently used configuration\nfor Deidentifying content, images, and storage.", + "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates", + "httpMethod": "POST", + "id": "dlp.organizations.deidentifyTemplates.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/deidentifyTemplates", + "request": { + "$ref": "GooglePrivacyDlpV2CreateDeidentifyTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a de-identify template.", + "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates/{deidentifyTemplatesId}", + "httpMethod": "DELETE", + "id": "dlp.organizations.deidentifyTemplates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and deidentify template to be deleted,\nfor example `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + "location": "path", + "pattern": "^organizations/[^/]+/deidentifyTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets a de-identify template.", + "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates/{deidentifyTemplatesId}", + "httpMethod": "GET", + "id": "dlp.organizations.deidentifyTemplates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and deidentify template to be read, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + "location": "path", + "pattern": "^organizations/[^/]+/deidentifyTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists de-identify templates.", + "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates", + "httpMethod": "GET", + "id": "dlp.organizations.deidentifyTemplates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListDeidentifyTemplates`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/deidentifyTemplates", + "response": { + "$ref": "GooglePrivacyDlpV2ListDeidentifyTemplatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the de-identify template.", + "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates/{deidentifyTemplatesId}", + "httpMethod": "PATCH", + "id": "dlp.organizations.deidentifyTemplates.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of organization and deidentify template to be updated, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + "location": "path", + "pattern": "^organizations/[^/]+/deidentifyTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "inspectTemplates": { + "methods": { + "create": { + "description": "Creates an inspect template for re-using frequently used configuration\nfor inspecting content, images, and storage.", + "flatPath": "v2/organizations/{organizationsId}/inspectTemplates", + "httpMethod": "POST", + "id": "dlp.organizations.inspectTemplates.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/inspectTemplates", + "request": { + "$ref": "GooglePrivacyDlpV2CreateInspectTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes an inspect template.", + "flatPath": "v2/organizations/{organizationsId}/inspectTemplates/{inspectTemplatesId}", + "httpMethod": "DELETE", + "id": "dlp.organizations.inspectTemplates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and inspectTemplate to be deleted, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + "location": "path", + "pattern": "^organizations/[^/]+/inspectTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets an inspect template.", + "flatPath": "v2/organizations/{organizationsId}/inspectTemplates/{inspectTemplatesId}", + "httpMethod": "GET", + "id": "dlp.organizations.inspectTemplates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and inspectTemplate to be read, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + "location": "path", + "pattern": "^organizations/[^/]+/inspectTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists inspect templates.", + "flatPath": "v2/organizations/{organizationsId}/inspectTemplates", + "httpMethod": "GET", + "id": "dlp.organizations.inspectTemplates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListInspectTemplates`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^organizations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/inspectTemplates", + "response": { + "$ref": "GooglePrivacyDlpV2ListInspectTemplatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the inspect template.", + "flatPath": "v2/organizations/{organizationsId}/inspectTemplates/{inspectTemplatesId}", + "httpMethod": "PATCH", + "id": "dlp.organizations.inspectTemplates.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of organization and inspectTemplate to be updated, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + "location": "path", + "pattern": "^organizations/[^/]+/inspectTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GooglePrivacyDlpV2UpdateInspectTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + }, + "projects": { + "resources": { + "content": { + "methods": { + "deidentify": { + "description": "De-identifies potentially sensitive info from a ContentItem.\nThis method has limits on input size and output size.\n[How-to guide](/dlp/docs/deidentify-sensitive-data)", + "flatPath": "v2/projects/{projectsId}/content:deidentify", + "httpMethod": "POST", + "id": "dlp.projects.content.deidentify", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/content:deidentify", + "request": { + "$ref": "GooglePrivacyDlpV2DeidentifyContentRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyContentResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "inspect": { + "description": "Finds potentially sensitive info in content.\nThis method has limits on input size, processing time, and output size.\n[How-to guide for text](/dlp/docs/inspecting-text), [How-to guide for\nimages](/dlp/docs/inspecting-images)", + "flatPath": "v2/projects/{projectsId}/content:inspect", + "httpMethod": "POST", + "id": "dlp.projects.content.inspect", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/content:inspect", + "request": { + "$ref": "GooglePrivacyDlpV2InspectContentRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2InspectContentResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "reidentify": { + "description": "Re-identifies content that has been de-identified.", + "flatPath": "v2/projects/{projectsId}/content:reidentify", + "httpMethod": "POST", + "id": "dlp.projects.content.reidentify", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/content:reidentify", + "request": { + "$ref": "GooglePrivacyDlpV2ReidentifyContentRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2ReidentifyContentResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "deidentifyTemplates": { + "methods": { + "create": { + "description": "Creates a de-identify template for re-using frequently used configuration\nfor Deidentifying content, images, and storage.", + "flatPath": "v2/projects/{projectsId}/deidentifyTemplates", + "httpMethod": "POST", + "id": "dlp.projects.deidentifyTemplates.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/deidentifyTemplates", + "request": { + "$ref": "GooglePrivacyDlpV2CreateDeidentifyTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a de-identify template.", + "flatPath": "v2/projects/{projectsId}/deidentifyTemplates/{deidentifyTemplatesId}", + "httpMethod": "DELETE", + "id": "dlp.projects.deidentifyTemplates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and deidentify template to be deleted,\nfor example `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + "location": "path", + "pattern": "^projects/[^/]+/deidentifyTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets a de-identify template.", + "flatPath": "v2/projects/{projectsId}/deidentifyTemplates/{deidentifyTemplatesId}", + "httpMethod": "GET", + "id": "dlp.projects.deidentifyTemplates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and deidentify template to be read, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + "location": "path", + "pattern": "^projects/[^/]+/deidentifyTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists de-identify templates.", + "flatPath": "v2/projects/{projectsId}/deidentifyTemplates", + "httpMethod": "GET", + "id": "dlp.projects.deidentifyTemplates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListDeidentifyTemplates`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/deidentifyTemplates", + "response": { + "$ref": "GooglePrivacyDlpV2ListDeidentifyTemplatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the de-identify template.", + "flatPath": "v2/projects/{projectsId}/deidentifyTemplates/{deidentifyTemplatesId}", + "httpMethod": "PATCH", + "id": "dlp.projects.deidentifyTemplates.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of organization and deidentify template to be updated, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + "location": "path", + "pattern": "^projects/[^/]+/deidentifyTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "dlpJobs": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running DlpJob. The server\nmakes a best effort to cancel the DlpJob, but success is not\nguaranteed.", + "flatPath": "v2/projects/{projectsId}/dlpJobs/{dlpJobsId}:cancel", + "httpMethod": "POST", + "id": "dlp.projects.dlpJobs.cancel", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the DlpJob resource to be cancelled.", + "location": "path", + "pattern": "^projects/[^/]+/dlpJobs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}:cancel", + "request": { + "$ref": "GooglePrivacyDlpV2CancelDlpJobRequest" + }, + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "create": { + "description": "Creates a new job to inspect storage or calculate risk metrics [How-to\nguide](/dlp/docs/compute-risk-analysis).", + "flatPath": "v2/projects/{projectsId}/dlpJobs", + "httpMethod": "POST", + "id": "dlp.projects.dlpJobs.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/dlpJobs", + "request": { + "$ref": "GooglePrivacyDlpV2CreateDlpJobRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2DlpJob" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a long-running DlpJob. This method indicates that the client is\nno longer interested in the DlpJob result. The job will be cancelled if\npossible.", + "flatPath": "v2/projects/{projectsId}/dlpJobs/{dlpJobsId}", + "httpMethod": "DELETE", + "id": "dlp.projects.dlpJobs.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the DlpJob resource to be deleted.", + "location": "path", + "pattern": "^projects/[^/]+/dlpJobs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets the latest state of a long-running DlpJob.", + "flatPath": "v2/projects/{projectsId}/dlpJobs/{dlpJobsId}", + "httpMethod": "GET", + "id": "dlp.projects.dlpJobs.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the DlpJob resource.", + "location": "path", + "pattern": "^projects/[^/]+/dlpJobs/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GooglePrivacyDlpV2DlpJob" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists DlpJobs that match the specified filter in the request.", + "flatPath": "v2/projects/{projectsId}/dlpJobs", + "httpMethod": "GET", + "id": "dlp.projects.dlpJobs.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "Optional. Allows filtering.\n\nSupported syntax:\n\n* Filter expressions are made up of one or more restrictions.\n* Restrictions can be combined by `AND` or `OR` logical operators. A\nsequence of restrictions implicitly uses `AND`.\n* A restriction has the form of `\u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e`.\n* Supported fields/values for inspect jobs:\n - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED\n - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY\n - `trigger_name` - The resource name of the trigger that created job.\n* Supported fields for risk analysis jobs:\n - `state` - RUNNING|CANCELED|FINISHED|FAILED\n* The operator must be `=` or `!=`.\n\nExamples:\n\n* inspected_storage = cloud_storage AND state = done\n* inspected_storage = cloud_storage OR inspected_storage = bigquery\n* inspected_storage = cloud_storage AND (state = done OR state = canceled)\n\nThe length of this field should be no more than 500 characters.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The standard list page size.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The standard list page token.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + }, + "type": { + "description": "The type of job. Defaults to `DlpJobType.INSPECT`", + "enum": [ + "DLP_JOB_TYPE_UNSPECIFIED", + "INSPECT_JOB", + "RISK_ANALYSIS_JOB" + ], + "location": "query", + "type": "string" + } + }, + "path": "v2/{+parent}/dlpJobs", + "response": { + "$ref": "GooglePrivacyDlpV2ListDlpJobsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "image": { + "methods": { + "redact": { + "description": "Redacts potentially sensitive info from an image.\nThis method has limits on input size, processing time, and output size.\n[How-to guide](/dlp/docs/redacting-sensitive-data-images)", + "flatPath": "v2/projects/{projectsId}/image:redact", + "httpMethod": "POST", + "id": "dlp.projects.image.redact", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/image:redact", + "request": { + "$ref": "GooglePrivacyDlpV2RedactImageRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2RedactImageResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "inspectTemplates": { + "methods": { + "create": { + "description": "Creates an inspect template for re-using frequently used configuration\nfor inspecting content, images, and storage.", + "flatPath": "v2/projects/{projectsId}/inspectTemplates", + "httpMethod": "POST", + "id": "dlp.projects.inspectTemplates.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/inspectTemplates", + "request": { + "$ref": "GooglePrivacyDlpV2CreateInspectTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes an inspect template.", + "flatPath": "v2/projects/{projectsId}/inspectTemplates/{inspectTemplatesId}", + "httpMethod": "DELETE", + "id": "dlp.projects.inspectTemplates.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and inspectTemplate to be deleted, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + "location": "path", + "pattern": "^projects/[^/]+/inspectTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets an inspect template.", + "flatPath": "v2/projects/{projectsId}/inspectTemplates/{inspectTemplatesId}", + "httpMethod": "GET", + "id": "dlp.projects.inspectTemplates.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the organization and inspectTemplate to be read, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + "location": "path", + "pattern": "^projects/[^/]+/inspectTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists inspect templates.", + "flatPath": "v2/projects/{projectsId}/inspectTemplates", + "httpMethod": "GET", + "id": "dlp.projects.inspectTemplates.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "pageSize": { + "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListInspectTemplates`.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/inspectTemplates", + "response": { + "$ref": "GooglePrivacyDlpV2ListInspectTemplatesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates the inspect template.", + "flatPath": "v2/projects/{projectsId}/inspectTemplates/{inspectTemplatesId}", + "httpMethod": "PATCH", + "id": "dlp.projects.inspectTemplates.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of organization and inspectTemplate to be updated, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + "location": "path", + "pattern": "^projects/[^/]+/inspectTemplates/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GooglePrivacyDlpV2UpdateInspectTemplateRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, + "jobTriggers": { + "methods": { + "create": { + "description": "Creates a job trigger to run DLP actions such as scanning storage for\nsensitive information on a set schedule.", + "flatPath": "v2/projects/{projectsId}/jobTriggers", + "httpMethod": "POST", + "id": "dlp.projects.jobTriggers.create", + "parameterOrder": [ + "parent" + ], + "parameters": { + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/jobTriggers", + "request": { + "$ref": "GooglePrivacyDlpV2CreateJobTriggerRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2JobTrigger" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "delete": { + "description": "Deletes a job trigger.", + "flatPath": "v2/projects/{projectsId}/jobTriggers/{jobTriggersId}", + "httpMethod": "DELETE", + "id": "dlp.projects.jobTriggers.delete", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the project and the triggeredJob, for example\n`projects/dlp-test-project/jobTriggers/53234423`.", + "location": "path", + "pattern": "^projects/[^/]+/jobTriggers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GoogleProtobufEmpty" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "get": { + "description": "Gets a job trigger.", + "flatPath": "v2/projects/{projectsId}/jobTriggers/{jobTriggersId}", + "httpMethod": "GET", + "id": "dlp.projects.jobTriggers.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the project and the triggeredJob, for example\n`projects/dlp-test-project/jobTriggers/53234423`.", + "location": "path", + "pattern": "^projects/[^/]+/jobTriggers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "response": { + "$ref": "GooglePrivacyDlpV2JobTrigger" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists job triggers.", + "flatPath": "v2/projects/{projectsId}/jobTriggers", + "httpMethod": "GET", + "id": "dlp.projects.jobTriggers.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "orderBy": { + "description": "Optional comma separated list of triggeredJob fields to order by,\nfollowed by 'asc/desc' postfix, i.e.\n`\"create_time asc,name desc,schedule_mode asc\"`. This list is\ncase-insensitive.\n\nExample: `\"name asc,schedule_mode desc, status desc\"`\n\nSupported filters keys and values are:\n\n- `create_time`: corresponds to time the triggeredJob was created.\n- `update_time`: corresponds to time the triggeredJob was last updated.\n- `name`: corresponds to JobTrigger's display name.\n- `status`: corresponds to the triggeredJob status.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Optional size of the page, can be limited by a server.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional page token to continue retrieval. Comes from previous call\nto ListJobTriggers. `order_by` and `filter` should not change for\nsubsequent calls, but can be omitted if token is specified.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name, for example projects/my-project-id.", + "location": "path", + "pattern": "^projects/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+parent}/jobTriggers", + "response": { + "$ref": "GooglePrivacyDlpV2ListJobTriggersResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "patch": { + "description": "Updates a job trigger.", + "flatPath": "v2/projects/{projectsId}/jobTriggers/{jobTriggersId}", + "httpMethod": "PATCH", + "id": "dlp.projects.jobTriggers.patch", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Resource name of the project and the triggeredJob, for example\n`projects/dlp-test-project/jobTriggers/53234423`.", + "location": "path", + "pattern": "^projects/[^/]+/jobTriggers/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v2/{+name}", + "request": { + "$ref": "GooglePrivacyDlpV2UpdateJobTriggerRequest" + }, + "response": { + "$ref": "GooglePrivacyDlpV2JobTrigger" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + } + } + } + }, + "revision": "20180320", + "rootUrl": "https://dlp.googleapis.com/", + "schemas": { + "GooglePrivacyDlpV2Action": { + "description": "A task to execute on the completion of a job.", + "id": "GooglePrivacyDlpV2Action", + "properties": { + "pubSub": { + "$ref": "GooglePrivacyDlpV2PublishToPubSub", + "description": "Publish a notification to a pubsub topic." + }, + "saveFindings": { + "$ref": "GooglePrivacyDlpV2SaveFindings", + "description": "Save resulting findings in a provided location." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails": { + "description": "Result of a risk analysis operation request.", + "id": "GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails", + "properties": { + "categoricalStatsResult": { + "$ref": "GooglePrivacyDlpV2CategoricalStatsResult" + }, + "kAnonymityResult": { + "$ref": "GooglePrivacyDlpV2KAnonymityResult" + }, + "kMapEstimationResult": { + "$ref": "GooglePrivacyDlpV2KMapEstimationResult" + }, + "lDiversityResult": { + "$ref": "GooglePrivacyDlpV2LDiversityResult" + }, + "numericalStatsResult": { + "$ref": "GooglePrivacyDlpV2NumericalStatsResult" + }, + "requestedPrivacyMetric": { + "$ref": "GooglePrivacyDlpV2PrivacyMetric", + "description": "Privacy metric to compute." + }, + "requestedSourceTable": { + "$ref": "GooglePrivacyDlpV2BigQueryTable", + "description": "Input dataset to compute metrics over." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2AuxiliaryTable": { + "description": "An auxiliary table contains statistical information on the relative\nfrequency of different quasi-identifiers values. It has one or several\nquasi-identifiers columns, and one column that indicates the relative\nfrequency of each quasi-identifier tuple.\nIf a tuple is present in the data but not in the auxiliary table, the\ncorresponding relative frequency is assumed to be zero (and thus, the\ntuple is highly reidentifiable).", + "id": "GooglePrivacyDlpV2AuxiliaryTable", + "properties": { + "quasiIds": { + "description": "Quasi-identifier columns. [required]", + "items": { + "$ref": "GooglePrivacyDlpV2QuasiIdField" + }, + "type": "array" + }, + "relativeFrequency": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "The relative frequency column must contain a floating-point number\nbetween 0 and 1 (inclusive). Null values are assumed to be zero.\n[required]" + }, + "table": { + "$ref": "GooglePrivacyDlpV2BigQueryTable", + "description": "Auxiliary table location. [required]" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2BigQueryKey": { + "description": "Row key for identifying a record in BigQuery table.", + "id": "GooglePrivacyDlpV2BigQueryKey", + "properties": { + "rowNumber": { + "description": "Absolute number of the row from the beginning of the table at the time\nof scanning.", + "format": "int64", + "type": "string" + }, + "tableReference": { + "$ref": "GooglePrivacyDlpV2BigQueryTable", + "description": "Complete BigQuery table reference." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2BigQueryOptions": { + "description": "Options defining BigQuery table and row identifiers.", + "id": "GooglePrivacyDlpV2BigQueryOptions", + "properties": { + "identifyingFields": { + "description": "References to fields uniquely identifying rows within the table.\nNested fields in the format, like `person.birthdate.year`, are allowed.", + "items": { + "$ref": "GooglePrivacyDlpV2FieldId" + }, + "type": "array" + }, + "tableReference": { + "$ref": "GooglePrivacyDlpV2BigQueryTable", + "description": "Complete BigQuery table reference." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2BigQueryTable": { + "description": "Message defining the location of a BigQuery table. A table is uniquely\nidentified by its project_id, dataset_id, and table_name. Within a query\na table is often referenced with a string in the format of:\n`\u003cproject_id\u003e:\u003cdataset_id\u003e.\u003ctable_id\u003e` or\n`\u003cproject_id\u003e.\u003cdataset_id\u003e.\u003ctable_id\u003e`.", + "id": "GooglePrivacyDlpV2BigQueryTable", + "properties": { + "datasetId": { + "description": "Dataset ID of the table.", + "type": "string" + }, + "projectId": { + "description": "The Google Cloud Platform project ID of the project containing the table.\nIf omitted, project ID is inferred from the API call.", + "type": "string" + }, + "tableId": { + "description": "Name of the table.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2BoundingBox": { + "description": "Bounding box encompassing detected text within an image.", + "id": "GooglePrivacyDlpV2BoundingBox", + "properties": { + "height": { + "description": "Height of the bounding box in pixels.", + "format": "int32", + "type": "integer" + }, + "left": { + "description": "Left coordinate of the bounding box. (0,0) is upper left.", + "format": "int32", + "type": "integer" + }, + "top": { + "description": "Top coordinate of the bounding box. (0,0) is upper left.", + "format": "int32", + "type": "integer" + }, + "width": { + "description": "Width of the bounding box in pixels.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Bucket": { + "description": "Bucket is represented as a range, along with replacement values.", + "id": "GooglePrivacyDlpV2Bucket", + "properties": { + "max": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Upper bound of the range, exclusive; type must match min." + }, + "min": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Lower bound of the range, inclusive. Type should be the same as max if\nused." + }, + "replacementValue": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Replacement value for this bucket. If not provided\nthe default behavior will be to hyphenate the min-max range." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2BucketingConfig": { + "description": "Generalization function that buckets values based on ranges. The ranges and\nreplacement values are dynamically provided by the user for custom behavior,\nsuch as 1-30 -\u003e LOW 31-65 -\u003e MEDIUM 66-100 -\u003e HIGH\nThis can be used on\ndata of type: number, long, string, timestamp.\nIf the bound `Value` type differs from the type of data being transformed, we\nwill first attempt converting the type of the data to be transformed to match\nthe type of the bound before comparing.", + "id": "GooglePrivacyDlpV2BucketingConfig", + "properties": { + "buckets": { + "description": "Set of buckets. Ranges must be non-overlapping.", + "items": { + "$ref": "GooglePrivacyDlpV2Bucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ByteContentItem": { + "description": "Container for bytes to inspect or redact.", + "id": "GooglePrivacyDlpV2ByteContentItem", + "properties": { + "data": { + "description": "Content data to inspect or redact.", + "format": "byte", + "type": "string" + }, + "type": { + "description": "The type of data stored in the bytes string. Default will be TEXT_UTF8.", + "enum": [ + "BYTES_TYPE_UNSPECIFIED", + "IMAGE_JPEG", + "IMAGE_BMP", + "IMAGE_PNG", + "IMAGE_SVG", + "TEXT_UTF8" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CancelDlpJobRequest": { + "description": "The request message for canceling a DLP job.", + "id": "GooglePrivacyDlpV2CancelDlpJobRequest", + "properties": {}, + "type": "object" + }, + "GooglePrivacyDlpV2CategoricalStatsConfig": { + "description": "Compute numerical stats over an individual column, including\nnumber of distinct values and value count distribution.", + "id": "GooglePrivacyDlpV2CategoricalStatsConfig", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Field to compute categorical stats on. All column types are\nsupported except for arrays and structs. However, it may be more\ninformative to use NumericalStats when the field type is supported,\ndepending on the data." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CategoricalStatsHistogramBucket": { + "id": "GooglePrivacyDlpV2CategoricalStatsHistogramBucket", + "properties": { + "bucketSize": { + "description": "Total number of values in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValueCount": { + "description": "Total number of distinct values in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of value frequencies in this bucket. The total number of\nvalues returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2ValueFrequency" + }, + "type": "array" + }, + "valueFrequencyLowerBound": { + "description": "Lower bound on the value frequency of the values in this bucket.", + "format": "int64", + "type": "string" + }, + "valueFrequencyUpperBound": { + "description": "Upper bound on the value frequency of the values in this bucket.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CategoricalStatsResult": { + "description": "Result of the categorical stats computation.", + "id": "GooglePrivacyDlpV2CategoricalStatsResult", + "properties": { + "valueFrequencyHistogramBuckets": { + "description": "Histogram of value frequencies in the column.", + "items": { + "$ref": "GooglePrivacyDlpV2CategoricalStatsHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CharacterMaskConfig": { + "description": "Partially mask a string by replacing a given number of characters with a\nfixed character. Masking can start from the beginning or end of the string.\nThis can be used on data of any type (numbers, longs, and so on) and when\nde-identifying structured data we'll attempt to preserve the original data's\ntype. (This allows you to take a long like 123 and modify it to a string like\n**3.", + "id": "GooglePrivacyDlpV2CharacterMaskConfig", + "properties": { + "charactersToIgnore": { + "description": "When masking a string, items in this list will be skipped when replacing.\nFor example, if your string is 555-555-5555 and you ask us to skip `-` and\nmask 5 chars with * we would produce ***-*55-5555.", + "items": { + "$ref": "GooglePrivacyDlpV2CharsToIgnore" + }, + "type": "array" + }, + "maskingCharacter": { + "description": "Character to mask the sensitive values\u0026mdash;for example, \"*\" for an\nalphabetic string such as name, or \"0\" for a numeric string such as ZIP\ncode or credit card number. String must have length 1. If not supplied, we\nwill default to \"*\" for strings, 0 for digits.", + "type": "string" + }, + "numberToMask": { + "description": "Number of characters to mask. If not set, all matching chars will be\nmasked. Skipped characters do not count towards this tally.", + "format": "int32", + "type": "integer" + }, + "reverseOrder": { + "description": "Mask characters in reverse order. For example, if `masking_character` is\n'0', number_to_mask is 14, and `reverse_order` is false, then\n1234-5678-9012-3456 -\u003e 00000000000000-3456\nIf `masking_character` is '*', `number_to_mask` is 3, and `reverse_order`\nis true, then 12345 -\u003e 12***", + "type": "boolean" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CharsToIgnore": { + "description": "Characters to skip when doing deidentification of a value. These will be left\nalone and skipped.", + "id": "GooglePrivacyDlpV2CharsToIgnore", + "properties": { + "charactersToSkip": { + "type": "string" + }, + "commonCharactersToIgnore": { + "enum": [ + "COMMON_CHARS_TO_IGNORE_UNSPECIFIED", + "NUMERIC", + "ALPHA_UPPER_CASE", + "ALPHA_LOWER_CASE", + "PUNCTUATION", + "WHITESPACE" + ], + "enumDescriptions": [ + "", + "0-9", + "A-Z", + "a-z", + "US Punctuation, one of !\"#$%\u0026'()*+,-./:;\u003c=\u003e?@[\\]^_`{|}~", + "Whitespace character, one of [ \\t\\n\\x0B\\f\\r]" + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CloudStorageOptions": { + "description": "Options defining a file or a set of files (path ending with *) within\na Google Cloud Storage bucket.", + "id": "GooglePrivacyDlpV2CloudStorageOptions", + "properties": { + "bytesLimitPerFile": { + "description": "Max number of bytes to scan from a file. If a scanned file's size is bigger\nthan this value then the rest of the bytes are omitted.", + "format": "int64", + "type": "string" + }, + "fileSet": { + "$ref": "GooglePrivacyDlpV2FileSet" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Color": { + "description": "Represents a color in the RGB color space.", + "id": "GooglePrivacyDlpV2Color", + "properties": { + "blue": { + "description": "The amount of blue in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" + }, + "green": { + "description": "The amount of green in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" + }, + "red": { + "description": "The amount of red in the color as a value in the interval [0, 1].", + "format": "float", + "type": "number" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Condition": { + "description": "The field type of `value` and `field` do not need to match to be\nconsidered equal, but not all comparisons are possible.\n\nA `value` of type:\n\n- `string` can be compared against all other types\n- `boolean` can only be compared against other booleans\n- `integer` can be compared against doubles or a string if the string value\ncan be parsed as an integer.\n- `double` can be compared against integers or a string if the string can\nbe parsed as a double.\n- `Timestamp` can be compared against strings in RFC 3339 date string\nformat.\n- `TimeOfDay` can be compared against timestamps and strings in the format\nof 'HH:mm:ss'.\n\nIf we fail to compare do to type mismatch, a warning will be given and\nthe condition will evaluate to false.", + "id": "GooglePrivacyDlpV2Condition", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Field within the record this condition is evaluated against. [required]" + }, + "operator": { + "description": "Operator used to compare the field or infoType to the value. [required]", + "enum": [ + "RELATIONAL_OPERATOR_UNSPECIFIED", + "EQUAL_TO", + "NOT_EQUAL_TO", + "GREATER_THAN", + "LESS_THAN", + "GREATER_THAN_OR_EQUALS", + "LESS_THAN_OR_EQUALS", + "EXISTS" + ], + "enumDescriptions": [ + "", + "Equal.", + "Not equal to.", + "Greater than.", + "Less than.", + "Greater than or equals.", + "Less than or equals.", + "Exists" + ], + "type": "string" + }, + "value": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Value to compare against. [Required, except for `EXISTS` tests.]" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Conditions": { + "description": "A collection of conditions.", + "id": "GooglePrivacyDlpV2Conditions", + "properties": { + "conditions": { + "items": { + "$ref": "GooglePrivacyDlpV2Condition" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ContentItem": { + "description": "Container structure for the content to inspect.", + "id": "GooglePrivacyDlpV2ContentItem", + "properties": { + "byteItem": { + "$ref": "GooglePrivacyDlpV2ByteContentItem", + "description": "Content data to inspect or redact. Replaces `type` and `data`." + }, + "table": { + "$ref": "GooglePrivacyDlpV2Table", + "description": "Structured content for inspection." + }, + "value": { + "description": "String data to inspect or redact.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ContentLocation": { + "description": "Findings container location data.", + "id": "GooglePrivacyDlpV2ContentLocation", + "properties": { + "containerName": { + "description": "Name of the container where the finding is located.\nThe top level name is the source file name or table name. Nested names\ncould be absent if the embedded object has no string identifier\n(for an example an image contained within a document).", + "type": "string" + }, + "containerTimestamp": { + "description": "Findings container modification timestamp, if applicable.\nFor Google Cloud Storage contains last file modification timestamp.\nFor BigQuery table contains last_modified_time property.\nFor Datastore - not populated.", + "format": "google-datetime", + "type": "string" + }, + "containerVersion": { + "description": "Findings container version, if available\n(\"generation\" for Google Cloud Storage).", + "type": "string" + }, + "documentLocation": { + "$ref": "GooglePrivacyDlpV2DocumentLocation", + "description": "Location data for document files." + }, + "imageLocation": { + "$ref": "GooglePrivacyDlpV2ImageLocation", + "description": "Location within an image's pixels." + }, + "recordLocation": { + "$ref": "GooglePrivacyDlpV2RecordLocation", + "description": "Location within a row or record of a database table." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CreateDeidentifyTemplateRequest": { + "description": "Request message for CreateDeidentifyTemplate.", + "id": "GooglePrivacyDlpV2CreateDeidentifyTemplateRequest", + "properties": { + "deidentifyTemplate": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate", + "description": "The DeidentifyTemplate to create." + }, + "templateId": { + "description": "The template id can contain uppercase and lowercase letters,\nnumbers, and hyphens; that is, it must match the regular\nexpression: `[a-zA-Z\\\\d-]+`. The maximum length is 100\ncharacters. Can be empty to allow the system to generate one.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CreateDlpJobRequest": { + "description": "Request message for CreateDlpJobRequest. Used to initiate long running\njobs such as calculating risk metrics or inspecting Google Cloud\nStorage.", + "id": "GooglePrivacyDlpV2CreateDlpJobRequest", + "properties": { + "inspectJob": { + "$ref": "GooglePrivacyDlpV2InspectJobConfig" + }, + "jobId": { + "description": "The job id can contain uppercase and lowercase letters,\nnumbers, and hyphens; that is, it must match the regular\nexpression: `[a-zA-Z\\\\d-]+`. The maximum length is 100\ncharacters. Can be empty to allow the system to generate one.", + "type": "string" + }, + "riskJob": { + "$ref": "GooglePrivacyDlpV2RiskAnalysisJobConfig" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CreateInspectTemplateRequest": { + "description": "Request message for CreateInspectTemplate.", + "id": "GooglePrivacyDlpV2CreateInspectTemplateRequest", + "properties": { + "inspectTemplate": { + "$ref": "GooglePrivacyDlpV2InspectTemplate", + "description": "The InspectTemplate to create." + }, + "templateId": { + "description": "The template id can contain uppercase and lowercase letters,\nnumbers, and hyphens; that is, it must match the regular\nexpression: `[a-zA-Z\\\\d-]+`. The maximum length is 100\ncharacters. Can be empty to allow the system to generate one.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CreateJobTriggerRequest": { + "description": "Request message for CreateJobTrigger.", + "id": "GooglePrivacyDlpV2CreateJobTriggerRequest", + "properties": { + "jobTrigger": { + "$ref": "GooglePrivacyDlpV2JobTrigger", + "description": "The JobTrigger to create." + }, + "triggerId": { + "description": "The trigger id can contain uppercase and lowercase letters,\nnumbers, and hyphens; that is, it must match the regular\nexpression: `[a-zA-Z\\\\d-]+`. The maximum length is 100\ncharacters. Can be empty to allow the system to generate one.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CryptoHashConfig": { + "description": "Pseudonymization method that generates surrogates via cryptographic hashing.\nUses SHA-256.\nThe key size must be either 32 or 64 bytes.\nOutputs a 32 byte digest as an uppercase hex string\n(for example, 41D1567F7F99F1DC2A5FAB886DEE5BEE).\nCurrently, only string and integer values can be hashed.", + "id": "GooglePrivacyDlpV2CryptoHashConfig", + "properties": { + "cryptoKey": { + "$ref": "GooglePrivacyDlpV2CryptoKey", + "description": "The key used by the hash function." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CryptoKey": { + "description": "This is a data encryption key (DEK) (as opposed to\na key encryption key (KEK) stored by KMS).\nWhen using KMS to wrap/unwrap DEKs, be sure to set an appropriate\nIAM policy on the KMS CryptoKey (KEK) to ensure an attacker cannot\nunwrap the data crypto key.", + "id": "GooglePrivacyDlpV2CryptoKey", + "properties": { + "kmsWrapped": { + "$ref": "GooglePrivacyDlpV2KmsWrappedCryptoKey" + }, + "transient": { + "$ref": "GooglePrivacyDlpV2TransientCryptoKey" + }, + "unwrapped": { + "$ref": "GooglePrivacyDlpV2UnwrappedCryptoKey" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig": { + "description": "Replaces an identifier with a surrogate using FPE with the FFX\nmode of operation; however when used in the `ReidentifyContent` API method,\nit serves the opposite function by reversing the surrogate back into\nthe original identifier.\nThe identifier must be encoded as ASCII.\nFor a given crypto key and context, the same identifier will be\nreplaced with the same surrogate.\nIdentifiers must be at least two characters long.\nIn the case that the identifier is the empty string, it will be skipped.\nSee [Pseudonymization](/dlp/docs/pseudonymization) for example usage.", + "id": "GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig", + "properties": { + "commonAlphabet": { + "enum": [ + "FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", + "NUMERIC", + "HEXADECIMAL", + "UPPER_CASE_ALPHA_NUMERIC", + "ALPHA_NUMERIC" + ], + "enumDescriptions": [ + "", + "[0-9] (radix of 10)", + "[0-9A-F] (radix of 16)", + "[0-9A-Z] (radix of 36)", + "[0-9A-Za-z] (radix of 62)" + ], + "type": "string" + }, + "context": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "The 'tweak', a context may be used for higher security since the same\nidentifier in two different contexts won't be given the same surrogate. If\nthe context is not set, a default tweak will be used.\n\nIf the context is set but:\n\n1. there is no record present when transforming a given value or\n1. the field is not present when transforming a given value,\n\na default tweak will be used.\n\nNote that case (1) is expected when an `InfoTypeTransformation` is\napplied to both structured and non-structured `ContentItem`s.\nCurrently, the referenced field may be of value type integer or string.\n\nThe tweak is constructed as a sequence of bytes in big endian byte order\nsuch that:\n\n- a 64 bit integer is encoded followed by a single byte of value 1\n- a string is encoded in UTF-8 format followed by a single byte of value\n å 2" + }, + "cryptoKey": { + "$ref": "GooglePrivacyDlpV2CryptoKey", + "description": "The key used by the encryption algorithm. [required]" + }, + "customAlphabet": { + "description": "This is supported by mapping these to the alphanumeric characters\nthat the FFX mode natively supports. This happens before/after\nencryption/decryption.\nEach character listed must appear only once.\nNumber of characters must be in the range [2, 62].\nThis must be encoded as ASCII.\nThe order of characters does not matter.", + "type": "string" + }, + "radix": { + "description": "The native way to select the alphabet. Must be in the range [2, 62].", + "format": "int32", + "type": "integer" + }, + "surrogateInfoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "The custom infoType to annotate the surrogate with.\nThis annotation will be applied to the surrogate by prefixing it with\nthe name of the custom infoType followed by the number of\ncharacters comprising the surrogate. The following scheme defines the\nformat: info_type_name(surrogate_character_count):surrogate\n\nFor example, if the name of custom infoType is 'MY_TOKEN_INFO_TYPE' and\nthe surrogate is 'abc', the full replacement value\nwill be: 'MY_TOKEN_INFO_TYPE(3):abc'\n\nThis annotation identifies the surrogate when inspecting content using the\ncustom infoType\n[`SurrogateType`](/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype).\nThis facilitates reversal of the surrogate when it occurs in free text.\n\nIn order for inspection to work properly, the name of this infoType must\nnot occur naturally anywhere in your data; otherwise, inspection may\nfind a surrogate that does not correspond to an actual identifier.\nTherefore, choose your custom infoType name carefully after considering\nwhat your data looks like. One way to select a name that has a high chance\nof yielding reliable detection is to include one or more unicode characters\nthat are highly improbable to exist in your data.\nFor example, assuming your data is entered from a regular ASCII keyboard,\nthe symbol with the hex code point 29DD might be used like so:\n⧝MY_TOKEN_TYPE" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2CustomInfoType": { + "description": "Custom information type provided by the user. Used to find domain-specific\nsensitive information configurable to the data in question.", + "id": "GooglePrivacyDlpV2CustomInfoType", + "properties": { + "detectionRules": { + "description": "Set of detection rules to apply to all findings of this custom info type.\nRules are applied in order that they are specified. Not supported for the\n`surrogate_type` custom info type.", + "items": { + "$ref": "GooglePrivacyDlpV2DetectionRule" + }, + "type": "array" + }, + "dictionary": { + "$ref": "GooglePrivacyDlpV2Dictionary", + "description": "Dictionary-based custom info type." + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "Info type configuration. All custom info types must have configurations\nthat do not conflict with built-in info types or other custom info types." + }, + "likelihood": { + "description": "Likelihood to return for this custom info type. This base value can be\naltered by a detection rule if the finding meets the criteria specified by\nthe rule. Defaults to `VERY_LIKELY` if not specified.", + "enum": [ + "LIKELIHOOD_UNSPECIFIED", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "enumDescriptions": [ + "Default value; information with all likelihoods is included.", + "Few matching elements.", + "", + "Some matching elements.", + "", + "Many matching elements." + ], + "type": "string" + }, + "regex": { + "$ref": "GooglePrivacyDlpV2Regex", + "description": "Regex-based custom info type." + }, + "surrogateType": { + "$ref": "GooglePrivacyDlpV2SurrogateType", + "description": "Surrogate info type." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DatastoreKey": { + "description": "Record key for a finding in Cloud Datastore.", + "id": "GooglePrivacyDlpV2DatastoreKey", + "properties": { + "entityKey": { + "$ref": "GooglePrivacyDlpV2Key", + "description": "Datastore entity key." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DatastoreOptions": { + "description": "Options defining a data set within Google Cloud Datastore.", + "id": "GooglePrivacyDlpV2DatastoreOptions", + "properties": { + "kind": { + "$ref": "GooglePrivacyDlpV2KindExpression", + "description": "The kind to process." + }, + "partitionId": { + "$ref": "GooglePrivacyDlpV2PartitionId", + "description": "A partition ID identifies a grouping of entities. The grouping is always\nby project and namespace, however the namespace ID may be empty." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DateShiftConfig": { + "description": "Shifts dates by random number of days, with option to be consistent for the\nsame context.", + "id": "GooglePrivacyDlpV2DateShiftConfig", + "properties": { + "context": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Points to the field that contains the context, for example, an entity id.\nIf set, must also set method. If set, shift will be consistent for the\ngiven context." + }, + "cryptoKey": { + "$ref": "GooglePrivacyDlpV2CryptoKey", + "description": "Causes the shift to be computed based on this key and the context. This\nresults in the same shift for the same context and crypto_key." + }, + "lowerBoundDays": { + "description": "For example, -5 means shift date to at most 5 days back in the past.\n[Required]", + "format": "int32", + "type": "integer" + }, + "upperBoundDays": { + "description": "Range of shift in days. Actual shift will be selected at random within this\nrange (inclusive ends). Negative means shift to earlier in time. Must not\nbe more than 365250 days (1000 years) each direction.\n\nFor example, 3 means shift date to at most 3 days into the future.\n[Required]", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DateTime": { + "description": "Message for a date time object.", + "id": "GooglePrivacyDlpV2DateTime", + "properties": { + "date": { + "$ref": "GoogleTypeDate", + "description": "One or more of the following must be set. All fields are optional, but\nwhen set must be valid date or time values." + }, + "dayOfWeek": { + "enum": [ + "DAY_OF_WEEK_UNSPECIFIED", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ], + "enumDescriptions": [ + "The unspecified day-of-week.", + "The day-of-week of Monday.", + "The day-of-week of Tuesday.", + "The day-of-week of Wednesday.", + "The day-of-week of Thursday.", + "The day-of-week of Friday.", + "The day-of-week of Saturday.", + "The day-of-week of Sunday." + ], + "type": "string" + }, + "time": { + "$ref": "GoogleTypeTimeOfDay" + }, + "timeZone": { + "$ref": "GooglePrivacyDlpV2TimeZone" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DeidentifyConfig": { + "description": "The configuration that controls how the data will change.", + "id": "GooglePrivacyDlpV2DeidentifyConfig", + "properties": { + "infoTypeTransformations": { + "$ref": "GooglePrivacyDlpV2InfoTypeTransformations", + "description": "Treat the dataset as free-form text and apply the same free text\ntransformation everywhere." + }, + "recordTransformations": { + "$ref": "GooglePrivacyDlpV2RecordTransformations", + "description": "Treat the dataset as structured. Transformations can be applied to\nspecific locations within structured datasets, such as transforming\na column within a table." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DeidentifyContentRequest": { + "description": "Request to de-identify a list of items.", + "id": "GooglePrivacyDlpV2DeidentifyContentRequest", + "properties": { + "deidentifyConfig": { + "$ref": "GooglePrivacyDlpV2DeidentifyConfig", + "description": "Configuration for the de-identification of the content item.\nItems specified here will override the template referenced by the\ndeidentify_template_name argument." + }, + "deidentifyTemplateName": { + "description": "Optional template to use. Any configuration directly specified in\ndeidentify_config will override those set in the template. Singular fields\nthat are set in this request will replace their corresponding fields in the\ntemplate. Repeated fields are appended. Singular sub-messages and groups\nare recursively merged.", + "type": "string" + }, + "inspectConfig": { + "$ref": "GooglePrivacyDlpV2InspectConfig", + "description": "Configuration for the inspector.\nItems specified here will override the template referenced by the\ninspect_template_name argument." + }, + "inspectTemplateName": { + "description": "Optional template to use. Any configuration directly specified in\ninspect_config will override those set in the template. Singular fields\nthat are set in this request will replace their corresponding fields in the\ntemplate. Repeated fields are appended. Singular sub-messages and groups\nare recursively merged.", + "type": "string" + }, + "item": { + "$ref": "GooglePrivacyDlpV2ContentItem", + "description": "The item to de-identify. Will be treated as text." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DeidentifyContentResponse": { + "description": "Results of de-identifying a ContentItem.", + "id": "GooglePrivacyDlpV2DeidentifyContentResponse", + "properties": { + "item": { + "$ref": "GooglePrivacyDlpV2ContentItem", + "description": "The de-identified item." + }, + "overview": { + "$ref": "GooglePrivacyDlpV2TransformationOverview", + "description": "An overview of the changes that were made on the `item`." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DeidentifyTemplate": { + "description": "The DeidentifyTemplates contains instructions on how to deidentify content.", + "id": "GooglePrivacyDlpV2DeidentifyTemplate", + "properties": { + "createTime": { + "description": "The creation timestamp of a inspectTemplate, output only field.", + "format": "google-datetime", + "type": "string" + }, + "deidentifyConfig": { + "$ref": "GooglePrivacyDlpV2DeidentifyConfig", + "description": "///////////// // The core content of the template // ///////////////" + }, + "description": { + "description": "Short description (max 256 chars).", + "type": "string" + }, + "displayName": { + "description": "Display name (max 256 chars).", + "type": "string" + }, + "name": { + "description": "The template name. Output only.\n\nThe template will have one of the following formats:\n`projects/PROJECT_ID/deidentifyTemplates/TEMPLATE_ID` OR\n`organizations/ORGANIZATION_ID/deidentifyTemplates/TEMPLATE_ID`", + "type": "string" + }, + "updateTime": { + "description": "The last update timestamp of a inspectTemplate, output only field.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DetectionRule": { + "description": "Rule for modifying a custom info type to alter behavior under certain\ncircumstances, depending on the specific details of the rule. Not supported\nfor the `surrogate_type` custom info type.", + "id": "GooglePrivacyDlpV2DetectionRule", + "properties": { + "hotwordRule": { + "$ref": "GooglePrivacyDlpV2HotwordRule", + "description": "Hotword-based detection rule." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Dictionary": { + "description": "Custom information type based on a dictionary of words or phrases. This can\nbe used to match sensitive information specific to the data, such as a list\nof employee IDs or job titles.\n\nDictionary words are case-insensitive and all characters other than letters\nand digits in the unicode [Basic Multilingual\nPlane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane)\nwill be replaced with whitespace when scanning for matches, so the\ndictionary phrase \"Sam Johnson\" will match all three phrases \"sam johnson\",\n\"Sam, Johnson\", and \"Sam (Johnson)\". Additionally, the characters\nsurrounding any match must be of a different type than the adjacent\ncharacters within the word, so letters must be next to non-letters and\ndigits next to non-digits. For example, the dictionary word \"jen\" will\nmatch the first three letters of the text \"jen123\" but will return no\nmatches for \"jennifer\".\n\nDictionary words containing a large number of characters that are not\nletters or digits may result in unexpected findings because such characters\nare treated as whitespace.", + "id": "GooglePrivacyDlpV2Dictionary", + "properties": { + "wordList": { + "$ref": "GooglePrivacyDlpV2WordList", + "description": "List of words or phrases to search for." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DlpJob": { + "description": "Combines all of the information about a DLP job.", + "id": "GooglePrivacyDlpV2DlpJob", + "properties": { + "createTime": { + "description": "Time when the job was created.", + "format": "google-datetime", + "type": "string" + }, + "endTime": { + "description": "Time when the job finished.", + "format": "google-datetime", + "type": "string" + }, + "errors": { + "description": "A stream of errors encountered running the job.", + "items": { + "$ref": "GooglePrivacyDlpV2Error" + }, + "type": "array" + }, + "inspectDetails": { + "$ref": "GooglePrivacyDlpV2InspectDataSourceDetails", + "description": "Results from inspecting a data source." + }, + "jobTriggerName": { + "description": "If created by a job trigger, the resource name of the trigger that\ninstantiated the job.", + "type": "string" + }, + "name": { + "description": "The server-assigned name.", + "type": "string" + }, + "riskDetails": { + "$ref": "GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails", + "description": "Results from analyzing risk of a data source." + }, + "startTime": { + "description": "Time when the job started.", + "format": "google-datetime", + "type": "string" + }, + "state": { + "description": "State of a job.", + "enum": [ + "JOB_STATE_UNSPECIFIED", + "PENDING", + "RUNNING", + "DONE", + "CANCELED", + "FAILED" + ], + "enumDescriptions": [ + "", + "The job has not yet started.", + "The job is currently running.", + "The job is no longer running.", + "The job was canceled before it could complete.", + "The job had an error and did not complete." + ], + "type": "string" + }, + "type": { + "description": "The type of job.", + "enum": [ + "DLP_JOB_TYPE_UNSPECIFIED", + "INSPECT_JOB", + "RISK_ANALYSIS_JOB" + ], + "enumDescriptions": [ + "", + "The job inspected Google Cloud for sensitive data.", + "The job executed a Risk Analysis computation." + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2DocumentLocation": { + "description": "Location of a finding within a document.", + "id": "GooglePrivacyDlpV2DocumentLocation", + "properties": { + "fileOffset": { + "description": "Offset of the line, from the beginning of the file, where the finding\nis located.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Error": { + "description": "Details information about an error encountered during job execution or\nthe results of an unsuccessful activation of the JobTrigger.\nOutput only field.", + "id": "GooglePrivacyDlpV2Error", + "properties": { + "details": { + "$ref": "GoogleRpcStatus" + }, + "timestamps": { + "description": "The times the error occurred.", + "items": { + "format": "google-datetime", + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Expressions": { + "description": "An expression, consisting or an operator and conditions.", + "id": "GooglePrivacyDlpV2Expressions", + "properties": { + "conditions": { + "$ref": "GooglePrivacyDlpV2Conditions" + }, + "logicalOperator": { + "description": "The operator to apply to the result of conditions. Default and currently\nonly supported value is `AND`.", + "enum": [ + "LOGICAL_OPERATOR_UNSPECIFIED", + "AND" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2FieldId": { + "description": "General identifier of a data field in a storage service.", + "id": "GooglePrivacyDlpV2FieldId", + "properties": { + "name": { + "description": "Name describing the field.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2FieldTransformation": { + "description": "The transformation to apply to the field.", + "id": "GooglePrivacyDlpV2FieldTransformation", + "properties": { + "condition": { + "$ref": "GooglePrivacyDlpV2RecordCondition", + "description": "Only apply the transformation if the condition evaluates to true for the\ngiven `RecordCondition`. The conditions are allowed to reference fields\nthat are not used in the actual transformation. [optional]\n\nExample Use Cases:\n\n- Apply a different bucket transformation to an age column if the zip code\ncolumn for the same record is within a specific range.\n- Redact a field if the date of birth field is greater than 85." + }, + "fields": { + "description": "Input field(s) to apply the transformation to. [required]", + "items": { + "$ref": "GooglePrivacyDlpV2FieldId" + }, + "type": "array" + }, + "infoTypeTransformations": { + "$ref": "GooglePrivacyDlpV2InfoTypeTransformations", + "description": "Treat the contents of the field as free text, and selectively\ntransform content that matches an `InfoType`." + }, + "primitiveTransformation": { + "$ref": "GooglePrivacyDlpV2PrimitiveTransformation", + "description": "Apply the transformation to the entire field." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2FileSet": { + "description": "Set of files to scan.", + "id": "GooglePrivacyDlpV2FileSet", + "properties": { + "url": { + "description": "The url, in the format `gs://\u003cbucket\u003e/\u003cpath\u003e`. Trailing wildcard in the\npath is allowed.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Finding": { + "description": "Represents a piece of potentially sensitive content.", + "id": "GooglePrivacyDlpV2Finding", + "properties": { + "createTime": { + "description": "Timestamp when finding was detected.", + "format": "google-datetime", + "type": "string" + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "The type of content that might have been found.\nProvided if requested by the `InspectConfig`." + }, + "likelihood": { + "description": "Estimate of how likely it is that the `info_type` is correct.", + "enum": [ + "LIKELIHOOD_UNSPECIFIED", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "enumDescriptions": [ + "Default value; information with all likelihoods is included.", + "Few matching elements.", + "", + "Some matching elements.", + "", + "Many matching elements." + ], + "type": "string" + }, + "location": { + "$ref": "GooglePrivacyDlpV2Location", + "description": "Where the content was found." + }, + "quote": { + "description": "The content that was found. Even if the content is not textual, it\nmay be converted to a textual representation here.\nProvided if requested by the `InspectConfig` and the finding is\nless than or equal to 4096 bytes long. If the finding exceeds 4096 bytes\nin length, the quote may be omitted.", + "type": "string" + }, + "quoteInfo": { + "$ref": "GooglePrivacyDlpV2QuoteInfo", + "description": "Contains data parsed from quotes. Only populated if include_quote was set\nto true and a supported infoType was requested. Currently supported\ninfoTypes: DATE, DATE_OF_BIRTH and TIME." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2FindingLimits": { + "id": "GooglePrivacyDlpV2FindingLimits", + "properties": { + "maxFindingsPerInfoType": { + "description": "Configuration of findings limit given for specified infoTypes.", + "items": { + "$ref": "GooglePrivacyDlpV2InfoTypeLimit" + }, + "type": "array" + }, + "maxFindingsPerItem": { + "description": "Max number of findings that will be returned for each item scanned.\nWhen set within `InspectDataSourceRequest`,\nthe maximum returned is 1000 regardless if this is set higher.\nWhen set within `InspectContentRequest`, this field is ignored.", + "format": "int32", + "type": "integer" + }, + "maxFindingsPerRequest": { + "description": "Max number of findings that will be returned per request/job.\nWhen set within `InspectContentRequest`, the maximum returned is 1000\nregardless if this is set higher.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2FixedSizeBucketingConfig": { + "description": "Buckets values based on fixed size ranges. The\nBucketing transformation can provide all of this functionality,\nbut requires more configuration. This message is provided as a convenience to\nthe user for simple bucketing strategies.\n\nThe transformed value will be a hyphenated string of\n\u003clower_bound\u003e-\u003cupper_bound\u003e, i.e if lower_bound = 10 and upper_bound = 20\nall values that are within this bucket will be replaced with \"10-20\".\n\nThis can be used on data of type: double, long.\n\nIf the bound Value type differs from the type of data\nbeing transformed, we will first attempt converting the type of the data to\nbe transformed to match the type of the bound before comparing.", + "id": "GooglePrivacyDlpV2FixedSizeBucketingConfig", + "properties": { + "bucketSize": { + "description": "Size of each bucket (except for minimum and maximum buckets). So if\n`lower_bound` = 10, `upper_bound` = 89, and `bucket_size` = 10, then the\nfollowing buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60,\n60-70, 70-80, 80-89, 89+. Precision up to 2 decimals works. [Required].", + "format": "double", + "type": "number" + }, + "lowerBound": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Lower bound value of buckets. All values less than `lower_bound` are\ngrouped together into a single bucket; for example if `lower_bound` = 10,\nthen all values less than 10 are replaced with the value “-10”. [Required]." + }, + "upperBound": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Upper bound value of buckets. All values greater than upper_bound are\ngrouped together into a single bucket; for example if `upper_bound` = 89,\nthen all values greater than 89 are replaced with the value “89+”.\n[Required]." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2HotwordRule": { + "description": "Detection rule that adjusts the likelihood of findings within a certain\nproximity of hotwords.", + "id": "GooglePrivacyDlpV2HotwordRule", + "properties": { + "hotwordRegex": { + "$ref": "GooglePrivacyDlpV2Regex", + "description": "Regex pattern defining what qualifies as a hotword." + }, + "likelihoodAdjustment": { + "$ref": "GooglePrivacyDlpV2LikelihoodAdjustment", + "description": "Likelihood adjustment to apply to all matching findings." + }, + "proximity": { + "$ref": "GooglePrivacyDlpV2Proximity", + "description": "Proximity of the finding within which the entire hotword must reside.\nThe total length of the window cannot exceed 1000 characters. Note that\nthe finding itself will be included in the window, so that hotwords may\nbe used to match substrings of the finding itself. For example, the\ncertainty of a phone number regex \"\\(\\d{3}\\) \\d{3}-\\d{4}\" could be\nadjusted upwards if the area code is known to be the local area code of\na company office using the hotword regex \"\\(xxx\\)\", where \"xxx\"\nis the area code in question." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ImageLocation": { + "description": "Location of the finding within an image.", + "id": "GooglePrivacyDlpV2ImageLocation", + "properties": { + "boundingBoxes": { + "description": "Bounding boxes locating the pixels within the image containing the finding.", + "items": { + "$ref": "GooglePrivacyDlpV2BoundingBox" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ImageRedactionConfig": { + "description": "Configuration for determining how redaction of images should occur.", + "id": "GooglePrivacyDlpV2ImageRedactionConfig", + "properties": { + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "Only one per info_type should be provided per request. If not\nspecified, and redact_all_text is false, the DLP API will redact all\ntext that it matches against all info_types that are found, but not\nspecified in another ImageRedactionConfig." + }, + "redactAllText": { + "description": "If true, all text found in the image, regardless whether it matches an\ninfo_type, is redacted.", + "type": "boolean" + }, + "redactionColor": { + "$ref": "GooglePrivacyDlpV2Color", + "description": "The color to use when redacting content from an image. If not specified,\nthe default is black." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InfoType": { + "description": "Type of information detected by the API.", + "id": "GooglePrivacyDlpV2InfoType", + "properties": { + "name": { + "description": "Name of the information type.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InfoTypeDescription": { + "description": "InfoType description.", + "id": "GooglePrivacyDlpV2InfoTypeDescription", + "properties": { + "displayName": { + "description": "Human readable form of the infoType name.", + "type": "string" + }, + "name": { + "description": "Internal name of the infoType.", + "type": "string" + }, + "supportedBy": { + "description": "Which parts of the API supports this InfoType.", + "enumDescriptions": [ + "", + "Supported by the inspect operations.", + "Supported by the risk analysis operations." + ], + "items": { + "enum": [ + "ENUM_TYPE_UNSPECIFIED", + "INSPECT", + "RISK_ANALYSIS" + ], + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InfoTypeLimit": { + "description": "Max findings configuration per infoType, per content item or long\nrunning DlpJob.", + "id": "GooglePrivacyDlpV2InfoTypeLimit", + "properties": { + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "Type of information the findings limit applies to. Only one limit per\ninfo_type should be provided. If InfoTypeLimit does not have an\ninfo_type, the DLP API applies the limit against all info_types that\nare found but not specified in another InfoTypeLimit." + }, + "maxFindings": { + "description": "Max findings limit for the given infoType.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InfoTypeStats": { + "description": "Statistics regarding a specific InfoType.", + "id": "GooglePrivacyDlpV2InfoTypeStats", + "properties": { + "count": { + "description": "Number of findings for this infoType.", + "format": "int64", + "type": "string" + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "The type of finding this stat is for." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InfoTypeTransformation": { + "description": "A transformation to apply to text that is identified as a specific\ninfo_type.", + "id": "GooglePrivacyDlpV2InfoTypeTransformation", + "properties": { + "infoTypes": { + "description": "InfoTypes to apply the transformation to. Empty list will match all\navailable infoTypes for this transformation.", + "items": { + "$ref": "GooglePrivacyDlpV2InfoType" + }, + "type": "array" + }, + "primitiveTransformation": { + "$ref": "GooglePrivacyDlpV2PrimitiveTransformation", + "description": "Primitive transformation to apply to the infoType. [required]" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InfoTypeTransformations": { + "description": "A type of transformation that will scan unstructured text and\napply various `PrimitiveTransformation`s to each finding, where the\ntransformation is applied to only values that were identified as a specific\ninfo_type.", + "id": "GooglePrivacyDlpV2InfoTypeTransformations", + "properties": { + "transformations": { + "description": "Transformation for each infoType. Cannot specify more than one\nfor a given infoType. [required]", + "items": { + "$ref": "GooglePrivacyDlpV2InfoTypeTransformation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectConfig": { + "description": "Configuration description of the scanning process.\nWhen used with redactContent only info_types and min_likelihood are currently\nused.", + "id": "GooglePrivacyDlpV2InspectConfig", + "properties": { + "contentOptions": { + "description": "List of options defining data content to scan.\nIf empty, text, images, and other content will be included.", + "enumDescriptions": [ + "Includes entire content of a file or a data stream.", + "Text content within the data, excluding any metadata.", + "Images found in the data." + ], + "items": { + "enum": [ + "CONTENT_UNSPECIFIED", + "CONTENT_TEXT", + "CONTENT_IMAGE" + ], + "type": "string" + }, + "type": "array" + }, + "customInfoTypes": { + "description": "Custom infoTypes provided by the user.", + "items": { + "$ref": "GooglePrivacyDlpV2CustomInfoType" + }, + "type": "array" + }, + "excludeInfoTypes": { + "description": "When true, excludes type information of the findings.", + "type": "boolean" + }, + "includeQuote": { + "description": "When true, a contextual quote from the data that triggered a finding is\nincluded in the response; see Finding.quote.", + "type": "boolean" + }, + "infoTypes": { + "description": "Restricts what info_types to look for. The values must correspond to\nInfoType values returned by ListInfoTypes or found in documentation.\nEmpty info_types runs all enabled detectors.", + "items": { + "$ref": "GooglePrivacyDlpV2InfoType" + }, + "type": "array" + }, + "limits": { + "$ref": "GooglePrivacyDlpV2FindingLimits" + }, + "minLikelihood": { + "description": "Only returns findings equal or above this threshold. The default is\nPOSSIBLE.", + "enum": [ + "LIKELIHOOD_UNSPECIFIED", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "enumDescriptions": [ + "Default value; information with all likelihoods is included.", + "Few matching elements.", + "", + "Some matching elements.", + "", + "Many matching elements." + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectContentRequest": { + "description": "Request to search for potentially sensitive info in a ContentItem.", + "id": "GooglePrivacyDlpV2InspectContentRequest", + "properties": { + "inspectConfig": { + "$ref": "GooglePrivacyDlpV2InspectConfig", + "description": "Configuration for the inspector. What specified here will override\nthe template referenced by the inspect_template_name argument." + }, + "inspectTemplateName": { + "description": "Optional template to use. Any configuration directly specified in\ninspect_config will override those set in the template. Singular fields\nthat are set in this request will replace their corresponding fields in the\ntemplate. Repeated fields are appended. Singular sub-messages and groups\nare recursively merged.", + "type": "string" + }, + "item": { + "$ref": "GooglePrivacyDlpV2ContentItem", + "description": "The item to inspect." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectContentResponse": { + "description": "Results of inspecting an item.", + "id": "GooglePrivacyDlpV2InspectContentResponse", + "properties": { + "result": { + "$ref": "GooglePrivacyDlpV2InspectResult", + "description": "The findings." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectDataSourceDetails": { + "description": "The results of an inspect DataSource job.", + "id": "GooglePrivacyDlpV2InspectDataSourceDetails", + "properties": { + "requestedOptions": { + "$ref": "GooglePrivacyDlpV2RequestedOptions", + "description": "The configuration used for this job." + }, + "result": { + "$ref": "GooglePrivacyDlpV2Result", + "description": "A summary of the outcome of this inspect job." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectJobConfig": { + "id": "GooglePrivacyDlpV2InspectJobConfig", + "properties": { + "actions": { + "description": "Actions to execute at the completion of the job. Are executed in the order\nprovided.", + "items": { + "$ref": "GooglePrivacyDlpV2Action" + }, + "type": "array" + }, + "inspectConfig": { + "$ref": "GooglePrivacyDlpV2InspectConfig", + "description": "How and what to scan for." + }, + "inspectTemplateName": { + "description": "If provided, will be used as the default for all values in InspectConfig.\n`inspect_config` will be merged into the values persisted as part of the\ntemplate.", + "type": "string" + }, + "storageConfig": { + "$ref": "GooglePrivacyDlpV2StorageConfig", + "description": "The data to scan." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectResult": { + "description": "All the findings for a single scanned item.", + "id": "GooglePrivacyDlpV2InspectResult", + "properties": { + "findings": { + "description": "List of findings for an item.", + "items": { + "$ref": "GooglePrivacyDlpV2Finding" + }, + "type": "array" + }, + "findingsTruncated": { + "description": "If true, then this item might have more findings than were returned,\nand the findings returned are an arbitrary subset of all findings.\nThe findings list might be truncated because the input items were too\nlarge, or because the server reached the maximum amount of resources\nallowed for a single API call. For best results, divide the input into\nsmaller batches.", + "type": "boolean" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2InspectTemplate": { + "description": "The inspectTemplate contains a configuration (set of types of sensitive data\nto be detected) to be used anywhere you otherwise would normally specify\nInspectConfig.", + "id": "GooglePrivacyDlpV2InspectTemplate", + "properties": { + "createTime": { + "description": "The creation timestamp of a inspectTemplate, output only field.", + "format": "google-datetime", + "type": "string" + }, + "description": { + "description": "Short description (max 256 chars).", + "type": "string" + }, + "displayName": { + "description": "Display name (max 256 chars).", + "type": "string" + }, + "inspectConfig": { + "$ref": "GooglePrivacyDlpV2InspectConfig", + "description": "The core content of the template. Configuration of the scanning process." + }, + "name": { + "description": "The template name. Output only.\n\nThe template will have one of the following formats:\n`projects/PROJECT_ID/inspectTemplates/TEMPLATE_ID` OR\n`organizations/ORGANIZATION_ID/inspectTemplates/TEMPLATE_ID`", + "type": "string" + }, + "updateTime": { + "description": "The last update timestamp of a inspectTemplate, output only field.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2JobTrigger": { + "description": "Contains a configuration to make dlp api calls on a repeating basis.", + "id": "GooglePrivacyDlpV2JobTrigger", + "properties": { + "createTime": { + "description": "The creation timestamp of a triggeredJob, output only field.", + "format": "google-datetime", + "type": "string" + }, + "description": { + "description": "User provided description (max 256 chars)", + "type": "string" + }, + "displayName": { + "description": "Display name (max 100 chars)", + "type": "string" + }, + "errors": { + "description": "A stream of errors encountered when the trigger was activated. Repeated\nerrors may result in the JobTrigger automaticaly being paused.\nWill return the last 100 errors. Whenever the JobTrigger is modified\nthis list will be cleared. Output only field.", + "items": { + "$ref": "GooglePrivacyDlpV2Error" + }, + "type": "array" + }, + "inspectJob": { + "$ref": "GooglePrivacyDlpV2InspectJobConfig" + }, + "lastRunTime": { + "description": "The timestamp of the last time this trigger executed, output only field.", + "format": "google-datetime", + "type": "string" + }, + "name": { + "description": "Unique resource name for the triggeredJob, assigned by the service when the\ntriggeredJob is created, for example\n`projects/dlp-test-project/triggeredJobs/53234423`.", + "type": "string" + }, + "status": { + "description": "A status for this trigger. [required]", + "enum": [ + "STATUS_UNSPECIFIED", + "HEALTHY", + "PAUSED", + "CANCELLED" + ], + "enumDescriptions": [ + "", + "Trigger is healthy.", + "Trigger is temporarily paused.", + "Trigger is cancelled and can not be resumed." + ], + "type": "string" + }, + "triggers": { + "description": "A list of triggers which will be OR'ed together. Only one in the list\nneeds to trigger for a job to be started. The list may contain only\na single Schedule trigger and must have at least one object.", + "items": { + "$ref": "GooglePrivacyDlpV2Trigger" + }, + "type": "array" + }, + "updateTime": { + "description": "The last update timestamp of a triggeredJob, output only field.", + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KAnonymityConfig": { + "description": "k-anonymity metric, used for analysis of reidentification risk.", + "id": "GooglePrivacyDlpV2KAnonymityConfig", + "properties": { + "quasiIds": { + "description": "Set of fields to compute k-anonymity over. When multiple fields are\nspecified, they are considered a single composite key. Structs and\nrepeated data types are not supported; however, nested fields are\nsupported so long as they are not structs themselves or nested within\na repeated field.", + "items": { + "$ref": "GooglePrivacyDlpV2FieldId" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KAnonymityEquivalenceClass": { + "description": "The set of columns' values that share the same ldiversity value", + "id": "GooglePrivacyDlpV2KAnonymityEquivalenceClass", + "properties": { + "equivalenceClassSize": { + "description": "Size of the equivalence class, for example number of rows with the\nabove set of values.", + "format": "int64", + "type": "string" + }, + "quasiIdsValues": { + "description": "Set of values defining the equivalence class. One value per\nquasi-identifier column in the original KAnonymity metric message.\nThe order is always the same as the original request.", + "items": { + "$ref": "GooglePrivacyDlpV2Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KAnonymityHistogramBucket": { + "id": "GooglePrivacyDlpV2KAnonymityHistogramBucket", + "properties": { + "bucketSize": { + "description": "Total number of equivalence classes in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValueCount": { + "description": "Total number of distinct equivalence classes in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of equivalence classes in this bucket. The total number of\nclasses returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2KAnonymityEquivalenceClass" + }, + "type": "array" + }, + "equivalenceClassSizeLowerBound": { + "description": "Lower bound on the size of the equivalence classes in this bucket.", + "format": "int64", + "type": "string" + }, + "equivalenceClassSizeUpperBound": { + "description": "Upper bound on the size of the equivalence classes in this bucket.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KAnonymityResult": { + "description": "Result of the k-anonymity computation.", + "id": "GooglePrivacyDlpV2KAnonymityResult", + "properties": { + "equivalenceClassHistogramBuckets": { + "description": "Histogram of k-anonymity equivalence classes.", + "items": { + "$ref": "GooglePrivacyDlpV2KAnonymityHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KMapEstimationConfig": { + "description": "Reidentifiability metric. This corresponds to a risk model similar to what\nis called \"journalist risk\" in the literature, except the attack dataset is\nstatistically modeled instead of being perfectly known. This can be done\nusing publicly available data (like the US Census), or using a custom\nstatistical model (indicated as one or several BigQuery tables), or by\nextrapolating from the distribution of values in the input dataset.", + "id": "GooglePrivacyDlpV2KMapEstimationConfig", + "properties": { + "auxiliaryTables": { + "description": "Several auxiliary tables can be used in the analysis. Each custom_tag\nused to tag a quasi-identifiers column must appear in exactly one column\nof one auxiliary table.", + "items": { + "$ref": "GooglePrivacyDlpV2AuxiliaryTable" + }, + "type": "array" + }, + "quasiIds": { + "description": "Fields considered to be quasi-identifiers. No two columns can have the\nsame tag. [required]", + "items": { + "$ref": "GooglePrivacyDlpV2TaggedField" + }, + "type": "array" + }, + "regionCode": { + "description": "ISO 3166-1 alpha-2 region code to use in the statistical modeling.\nRequired if no column is tagged with a region-specific InfoType (like\nUS_ZIP_5) or a region code.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KMapEstimationHistogramBucket": { + "description": "A KMapEstimationHistogramBucket message with the following values:\n min_anonymity: 3\n max_anonymity: 5\n frequency: 42\nmeans that there are 42 records whose quasi-identifier values correspond\nto 3, 4 or 5 people in the overlying population. An important particular\ncase is when min_anonymity = max_anonymity = 1: the frequency field then\ncorresponds to the number of uniquely identifiable records.", + "id": "GooglePrivacyDlpV2KMapEstimationHistogramBucket", + "properties": { + "bucketSize": { + "description": "Number of records within these anonymity bounds.", + "format": "int64", + "type": "string" + }, + "bucketValueCount": { + "description": "Total number of distinct quasi-identifier tuple values in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of quasi-identifier tuple values in this bucket. The total\nnumber of classes returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2KMapEstimationQuasiIdValues" + }, + "type": "array" + }, + "maxAnonymity": { + "description": "Always greater than or equal to min_anonymity.", + "format": "int64", + "type": "string" + }, + "minAnonymity": { + "description": "Always positive.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KMapEstimationQuasiIdValues": { + "description": "A tuple of values for the quasi-identifier columns.", + "id": "GooglePrivacyDlpV2KMapEstimationQuasiIdValues", + "properties": { + "estimatedAnonymity": { + "description": "The estimated anonymity for these quasi-identifier values.", + "format": "int64", + "type": "string" + }, + "quasiIdsValues": { + "description": "The quasi-identifier values.", + "items": { + "$ref": "GooglePrivacyDlpV2Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KMapEstimationResult": { + "description": "Result of the reidentifiability analysis. Note that these results are an\nestimation, not exact values.", + "id": "GooglePrivacyDlpV2KMapEstimationResult", + "properties": { + "kMapEstimationHistogram": { + "description": "The intervals [min_anonymity, max_anonymity] do not overlap. If a value\ndoesn't correspond to any such interval, the associated frequency is\nzero. For example, the following records:\n {min_anonymity: 1, max_anonymity: 1, frequency: 17}\n {min_anonymity: 2, max_anonymity: 3, frequency: 42}\n {min_anonymity: 5, max_anonymity: 10, frequency: 99}\nmean that there are no record with an estimated anonymity of 4, 5, or\nlarger than 10.", + "items": { + "$ref": "GooglePrivacyDlpV2KMapEstimationHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Key": { + "description": "A unique identifier for a Datastore entity.\nIf a key's partition ID or any of its path kinds or names are\nreserved/read-only, the key is reserved/read-only.\nA reserved/read-only key is forbidden in certain documented contexts.", + "id": "GooglePrivacyDlpV2Key", + "properties": { + "partitionId": { + "$ref": "GooglePrivacyDlpV2PartitionId", + "description": "Entities are partitioned into subsets, currently identified by a project\nID and namespace ID.\nQueries are scoped to a single partition." + }, + "path": { + "description": "The entity path.\nAn entity path consists of one or more elements composed of a kind and a\nstring or numerical identifier, which identify entities. The first\nelement identifies a _root entity_, the second element identifies\na _child_ of the root entity, the third element identifies a child of the\nsecond entity, and so forth. The entities identified by all prefixes of\nthe path are called the element's _ancestors_.\n\nA path can never be empty, and a path can have at most 100 elements.", + "items": { + "$ref": "GooglePrivacyDlpV2PathElement" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KindExpression": { + "description": "A representation of a Datastore kind.", + "id": "GooglePrivacyDlpV2KindExpression", + "properties": { + "name": { + "description": "The name of the kind.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2KmsWrappedCryptoKey": { + "description": "Include to use an existing data crypto key wrapped by KMS.\nAuthorization requires the following IAM permissions when sending a request\nto perform a crypto transformation using a kms-wrapped crypto key:\ndlp.kms.encrypt", + "id": "GooglePrivacyDlpV2KmsWrappedCryptoKey", + "properties": { + "cryptoKeyName": { + "description": "The resource name of the KMS CryptoKey to use for unwrapping. [required]", + "type": "string" + }, + "wrappedKey": { + "description": "The wrapped data crypto key. [required]", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2LDiversityConfig": { + "description": "l-diversity metric, used for analysis of reidentification risk.", + "id": "GooglePrivacyDlpV2LDiversityConfig", + "properties": { + "quasiIds": { + "description": "Set of quasi-identifiers indicating how equivalence classes are\ndefined for the l-diversity computation. When multiple fields are\nspecified, they are considered a single composite key.", + "items": { + "$ref": "GooglePrivacyDlpV2FieldId" + }, + "type": "array" + }, + "sensitiveAttribute": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Sensitive field for computing the l-value." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2LDiversityEquivalenceClass": { + "description": "The set of columns' values that share the same ldiversity value.", + "id": "GooglePrivacyDlpV2LDiversityEquivalenceClass", + "properties": { + "equivalenceClassSize": { + "description": "Size of the k-anonymity equivalence class.", + "format": "int64", + "type": "string" + }, + "numDistinctSensitiveValues": { + "description": "Number of distinct sensitive values in this equivalence class.", + "format": "int64", + "type": "string" + }, + "quasiIdsValues": { + "description": "Quasi-identifier values defining the k-anonymity equivalence\nclass. The order is always the same as the original request.", + "items": { + "$ref": "GooglePrivacyDlpV2Value" + }, + "type": "array" + }, + "topSensitiveValues": { + "description": "Estimated frequencies of top sensitive values.", + "items": { + "$ref": "GooglePrivacyDlpV2ValueFrequency" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2LDiversityHistogramBucket": { + "id": "GooglePrivacyDlpV2LDiversityHistogramBucket", + "properties": { + "bucketSize": { + "description": "Total number of equivalence classes in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValueCount": { + "description": "Total number of distinct equivalence classes in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of equivalence classes in this bucket. The total number of\nclasses returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2LDiversityEquivalenceClass" + }, + "type": "array" + }, + "sensitiveValueFrequencyLowerBound": { + "description": "Lower bound on the sensitive value frequencies of the equivalence\nclasses in this bucket.", + "format": "int64", + "type": "string" + }, + "sensitiveValueFrequencyUpperBound": { + "description": "Upper bound on the sensitive value frequencies of the equivalence\nclasses in this bucket.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2LDiversityResult": { + "description": "Result of the l-diversity computation.", + "id": "GooglePrivacyDlpV2LDiversityResult", + "properties": { + "sensitiveValueFrequencyHistogramBuckets": { + "description": "Histogram of l-diversity equivalence class sensitive value frequencies.", + "items": { + "$ref": "GooglePrivacyDlpV2LDiversityHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2LikelihoodAdjustment": { + "description": "Message for specifying an adjustment to the likelihood of a finding as\npart of a detection rule.", + "id": "GooglePrivacyDlpV2LikelihoodAdjustment", + "properties": { + "fixedLikelihood": { + "description": "Set the likelihood of a finding to a fixed value.", + "enum": [ + "LIKELIHOOD_UNSPECIFIED", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "enumDescriptions": [ + "Default value; information with all likelihoods is included.", + "Few matching elements.", + "", + "Some matching elements.", + "", + "Many matching elements." + ], + "type": "string" + }, + "relativeLikelihood": { + "description": "Increase or decrease the likelihood by the specified number of\nlevels. For example, if a finding would be `POSSIBLE` without the\ndetection rule and `relative_likelihood` is 1, then it is upgraded to\n`LIKELY`, while a value of -1 would downgrade it to `UNLIKELY`.\nLikelihood may never drop below `VERY_UNLIKELY` or exceed\n`VERY_LIKELY`, so applying an adjustment of 1 followed by an\nadjustment of -1 when base likelihood is `VERY_LIKELY` will result in\na final likelihood of `LIKELY`.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ListDeidentifyTemplatesResponse": { + "description": "Response message for ListDeidentifyTemplates.", + "id": "GooglePrivacyDlpV2ListDeidentifyTemplatesResponse", + "properties": { + "deidentifyTemplates": { + "description": "List of deidentify templates, up to page_size in\nListDeidentifyTemplatesRequest.", + "items": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If the next page is available then the next page token to be used\nin following ListDeidentifyTemplates request.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ListDlpJobsResponse": { + "description": "The response message for listing DLP jobs.", + "id": "GooglePrivacyDlpV2ListDlpJobsResponse", + "properties": { + "jobs": { + "description": "A list of DlpJobs that matches the specified filter in the request.", + "items": { + "$ref": "GooglePrivacyDlpV2DlpJob" + }, + "type": "array" + }, + "nextPageToken": { + "description": "The standard List next-page token.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ListInfoTypesResponse": { + "description": "Response to the ListInfoTypes request.", + "id": "GooglePrivacyDlpV2ListInfoTypesResponse", + "properties": { + "infoTypes": { + "description": "Set of sensitive infoTypes.", + "items": { + "$ref": "GooglePrivacyDlpV2InfoTypeDescription" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ListInspectTemplatesResponse": { + "description": "Response message for ListInspectTemplates.", + "id": "GooglePrivacyDlpV2ListInspectTemplatesResponse", + "properties": { + "inspectTemplates": { + "description": "List of inspectTemplates, up to page_size in ListInspectTemplatesRequest.", + "items": { + "$ref": "GooglePrivacyDlpV2InspectTemplate" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If the next page is available then the next page token to be used\nin following ListInspectTemplates request.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ListJobTriggersResponse": { + "description": "Response message for ListJobTriggers.", + "id": "GooglePrivacyDlpV2ListJobTriggersResponse", + "properties": { + "jobTriggers": { + "description": "List of triggeredJobs, up to page_size in ListJobTriggersRequest.", + "items": { + "$ref": "GooglePrivacyDlpV2JobTrigger" + }, + "type": "array" + }, + "nextPageToken": { + "description": "If the next page is available then the next page token to be used\nin following ListJobTriggers request.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Location": { + "description": "Specifies the location of the finding.", + "id": "GooglePrivacyDlpV2Location", + "properties": { + "byteRange": { + "$ref": "GooglePrivacyDlpV2Range", + "description": "Zero-based byte offsets delimiting the finding.\nThese are relative to the finding's containing element.\nNote that when the content is not textual, this references\nthe UTF-8 encoded textual representation of the content.\nOmitted if content is an image." + }, + "codepointRange": { + "$ref": "GooglePrivacyDlpV2Range", + "description": "Unicode character offsets delimiting the finding.\nThese are relative to the finding's containing element.\nProvided when the content is text." + }, + "contentLocations": { + "description": "List of nested objects pointing to the precise location of the finding\nwithin the file or record.", + "items": { + "$ref": "GooglePrivacyDlpV2ContentLocation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2NumericalStatsConfig": { + "description": "Compute numerical stats over an individual column, including\nmin, max, and quantiles.", + "id": "GooglePrivacyDlpV2NumericalStatsConfig", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Field to compute numerical stats on. Supported types are\ninteger, float, date, datetime, timestamp, time." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2NumericalStatsResult": { + "description": "Result of the numerical stats computation.", + "id": "GooglePrivacyDlpV2NumericalStatsResult", + "properties": { + "maxValue": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Maximum value appearing in the column." + }, + "minValue": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Minimum value appearing in the column." + }, + "quantileValues": { + "description": "List of 99 values that partition the set of field values into 100 equal\nsized buckets.", + "items": { + "$ref": "GooglePrivacyDlpV2Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2OutputStorageConfig": { + "description": "Cloud repository for storing output.", + "id": "GooglePrivacyDlpV2OutputStorageConfig", + "properties": { + "outputSchema": { + "description": "Schema used for writing the findings. Columns are derived from the\n`Finding` object. If appending to an existing table, any columns from the\npredefined schema that are missing will be added. No columns in the\nexisting table will be deleted.\n\nIf unspecified, then all available columns will be used for a new table,\nand no changes will be made to an existing table.", + "enum": [ + "OUTPUT_SCHEMA_UNSPECIFIED", + "BASIC_COLUMNS", + "GCS_COLUMNS", + "DATASTORE_COLUMNS", + "BIG_QUERY_COLUMNS", + "ALL_COLUMNS" + ], + "enumDescriptions": [ + "", + "Basic schema including only `info_type`, `quote`, `certainty`, and\n`timestamp`.", + "Schema tailored to findings from scanning Google Cloud Storage.", + "Schema tailored to findings from scanning Google Datastore.", + "Schema tailored to findings from scanning Google BigQuery.", + "Schema containing all columns." + ], + "type": "string" + }, + "table": { + "$ref": "GooglePrivacyDlpV2BigQueryTable", + "description": "Store findings in an existing table or a new table in an existing\ndataset. Each column in an existing table must have the same name, type,\nand mode of a field in the `Finding` object. If table_id is not set a new\none will be generated for you with the following format:\ndlp_googleapis_yyyy_mm_dd_[dlp_job_id]. Pacific timezone will be used for\ngenerating the date details." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2PartitionId": { + "description": "Datastore partition ID.\nA partition ID identifies a grouping of entities. The grouping is always\nby project and namespace, however the namespace ID may be empty.\n\nA partition ID contains several dimensions:\nproject ID and namespace ID.", + "id": "GooglePrivacyDlpV2PartitionId", + "properties": { + "namespaceId": { + "description": "If not empty, the ID of the namespace to which the entities belong.", + "type": "string" + }, + "projectId": { + "description": "The ID of the project to which the entities belong.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2PathElement": { + "description": "A (kind, ID/name) pair used to construct a key path.\n\nIf either name or ID is set, the element is complete.\nIf neither is set, the element is incomplete.", + "id": "GooglePrivacyDlpV2PathElement", + "properties": { + "id": { + "description": "The auto-allocated ID of the entity.\nNever equal to zero. Values less than zero are discouraged and may not\nbe supported in the future.", + "format": "int64", + "type": "string" + }, + "kind": { + "description": "The kind of the entity.\nA kind matching regex `__.*__` is reserved/read-only.\nA kind must not contain more than 1500 bytes when UTF-8 encoded.\nCannot be `\"\"`.", + "type": "string" + }, + "name": { + "description": "The name of the entity.\nA name matching regex `__.*__` is reserved/read-only.\nA name must not be more than 1500 bytes when UTF-8 encoded.\nCannot be `\"\"`.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2PrimitiveTransformation": { + "description": "A rule for transforming a value.", + "id": "GooglePrivacyDlpV2PrimitiveTransformation", + "properties": { + "bucketingConfig": { + "$ref": "GooglePrivacyDlpV2BucketingConfig" + }, + "characterMaskConfig": { + "$ref": "GooglePrivacyDlpV2CharacterMaskConfig" + }, + "cryptoHashConfig": { + "$ref": "GooglePrivacyDlpV2CryptoHashConfig" + }, + "cryptoReplaceFfxFpeConfig": { + "$ref": "GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig" + }, + "dateShiftConfig": { + "$ref": "GooglePrivacyDlpV2DateShiftConfig" + }, + "fixedSizeBucketingConfig": { + "$ref": "GooglePrivacyDlpV2FixedSizeBucketingConfig" + }, + "redactConfig": { + "$ref": "GooglePrivacyDlpV2RedactConfig" + }, + "replaceConfig": { + "$ref": "GooglePrivacyDlpV2ReplaceValueConfig" + }, + "replaceWithInfoTypeConfig": { + "$ref": "GooglePrivacyDlpV2ReplaceWithInfoTypeConfig" + }, + "timePartConfig": { + "$ref": "GooglePrivacyDlpV2TimePartConfig" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2PrivacyMetric": { + "description": "Privacy metric to compute for reidentification risk analysis.", + "id": "GooglePrivacyDlpV2PrivacyMetric", + "properties": { + "categoricalStatsConfig": { + "$ref": "GooglePrivacyDlpV2CategoricalStatsConfig" + }, + "kAnonymityConfig": { + "$ref": "GooglePrivacyDlpV2KAnonymityConfig" + }, + "kMapEstimationConfig": { + "$ref": "GooglePrivacyDlpV2KMapEstimationConfig" + }, + "lDiversityConfig": { + "$ref": "GooglePrivacyDlpV2LDiversityConfig" + }, + "numericalStatsConfig": { + "$ref": "GooglePrivacyDlpV2NumericalStatsConfig" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Proximity": { + "description": "Message for specifying a window around a finding to apply a detection\nrule.", + "id": "GooglePrivacyDlpV2Proximity", + "properties": { + "windowAfter": { + "description": "Number of characters after the finding to consider.", + "format": "int32", + "type": "integer" + }, + "windowBefore": { + "description": "Number of characters before the finding to consider.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2PublishToPubSub": { + "description": "Publish the results of a DlpJob to a pub sub channel.\nCompatible with: Inpect, Risk", + "id": "GooglePrivacyDlpV2PublishToPubSub", + "properties": { + "topic": { + "description": "Cloud Pub/Sub topic to send notifications to. The topic must have given\npublishing access rights to the DLP API service account executing\nthe long running DlpJob sending the notifications.\nFormat is projects/{project}/topics/{topic}.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2QuasiIdField": { + "description": "A quasi-identifier column has a custom_tag, used to know which column\nin the data corresponds to which column in the statistical model.", + "id": "GooglePrivacyDlpV2QuasiIdField", + "properties": { + "customTag": { + "type": "string" + }, + "field": { + "$ref": "GooglePrivacyDlpV2FieldId" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2QuoteInfo": { + "description": "Message for infoType-dependent details parsed from quote.", + "id": "GooglePrivacyDlpV2QuoteInfo", + "properties": { + "dateTime": { + "$ref": "GooglePrivacyDlpV2DateTime" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Range": { + "description": "Generic half-open interval [start, end)", + "id": "GooglePrivacyDlpV2Range", + "properties": { + "end": { + "description": "Index of the last character of the range (exclusive).", + "format": "int64", + "type": "string" + }, + "start": { + "description": "Index of the first character of the range (inclusive).", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RecordCondition": { + "description": "A condition for determining whether a transformation should be applied to\na field.", + "id": "GooglePrivacyDlpV2RecordCondition", + "properties": { + "expressions": { + "$ref": "GooglePrivacyDlpV2Expressions", + "description": "An expression." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RecordKey": { + "description": "Message for a unique key indicating a record that contains a finding.", + "id": "GooglePrivacyDlpV2RecordKey", + "properties": { + "bigQueryKey": { + "$ref": "GooglePrivacyDlpV2BigQueryKey" + }, + "datastoreKey": { + "$ref": "GooglePrivacyDlpV2DatastoreKey" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RecordLocation": { + "description": "Location of a finding within a row or record.", + "id": "GooglePrivacyDlpV2RecordLocation", + "properties": { + "fieldId": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Field id of the field containing the finding." + }, + "recordKey": { + "$ref": "GooglePrivacyDlpV2RecordKey", + "description": "Key of the finding." + }, + "tableLocation": { + "$ref": "GooglePrivacyDlpV2TableLocation", + "description": "Location within a `ContentItem.Table`." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RecordSuppression": { + "description": "Configuration to suppress records whose suppression conditions evaluate to\ntrue.", + "id": "GooglePrivacyDlpV2RecordSuppression", + "properties": { + "condition": { + "$ref": "GooglePrivacyDlpV2RecordCondition", + "description": "A condition that when it evaluates to true will result in the record being\nevaluated to be suppressed from the transformed content." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RecordTransformations": { + "description": "A type of transformation that is applied over structured data such as a\ntable.", + "id": "GooglePrivacyDlpV2RecordTransformations", + "properties": { + "fieldTransformations": { + "description": "Transform the record by applying various field transformations.", + "items": { + "$ref": "GooglePrivacyDlpV2FieldTransformation" + }, + "type": "array" + }, + "recordSuppressions": { + "description": "Configuration defining which records get suppressed entirely. Records that\nmatch any suppression rule are omitted from the output [optional].", + "items": { + "$ref": "GooglePrivacyDlpV2RecordSuppression" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RedactConfig": { + "description": "Redact a given value. For example, if used with an `InfoTypeTransformation`\ntransforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the\noutput would be 'My phone number is '.", + "id": "GooglePrivacyDlpV2RedactConfig", + "properties": {}, + "type": "object" + }, + "GooglePrivacyDlpV2RedactImageRequest": { + "description": "Request to search for potentially sensitive info in a list of items\nand replace it with a default or provided content.", + "id": "GooglePrivacyDlpV2RedactImageRequest", + "properties": { + "byteItem": { + "$ref": "GooglePrivacyDlpV2ByteContentItem", + "description": "The content must be PNG, JPEG, SVG or BMP." + }, + "imageRedactionConfigs": { + "description": "The configuration for specifying what content to redact from images.", + "items": { + "$ref": "GooglePrivacyDlpV2ImageRedactionConfig" + }, + "type": "array" + }, + "inspectConfig": { + "$ref": "GooglePrivacyDlpV2InspectConfig", + "description": "Configuration for the inspector." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RedactImageResponse": { + "description": "Results of redacting an image.", + "id": "GooglePrivacyDlpV2RedactImageResponse", + "properties": { + "extractedText": { + "description": "If an image was being inspected and the InspectConfig's include_quote was\nset to true, then this field will include all text, if any, that was found\nin the image.", + "type": "string" + }, + "redactedImage": { + "description": "The redacted image. The type will be the same as the original image.", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Regex": { + "description": "Message defining a custom regular expression.", + "id": "GooglePrivacyDlpV2Regex", + "properties": { + "pattern": { + "description": "Pattern defining the regular expression.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ReidentifyContentRequest": { + "description": "Request to re-identify an item.", + "id": "GooglePrivacyDlpV2ReidentifyContentRequest", + "properties": { + "inspectConfig": { + "$ref": "GooglePrivacyDlpV2InspectConfig", + "description": "Configuration for the inspector." + }, + "inspectTemplateName": { + "description": "Optional template to use. Any configuration directly specified in\n`inspect_config` will override those set in the template. Singular fields\nthat are set in this request will replace their corresponding fields in the\ntemplate. Repeated fields are appended. Singular sub-messages and groups\nare recursively merged.", + "type": "string" + }, + "item": { + "$ref": "GooglePrivacyDlpV2ContentItem", + "description": "The item to re-identify. Will be treated as text." + }, + "reidentifyConfig": { + "$ref": "GooglePrivacyDlpV2DeidentifyConfig", + "description": "Configuration for the re-identification of the content item.\nThis field shares the same proto message type that is used for\nde-identification, however its usage here is for the reversal of the\nprevious de-identification. Re-identification is performed by examining\nthe transformations used to de-identify the items and executing the\nreverse. This requires that only reversible transformations\nbe provided here. The reversible transformations are:\n\n - `CryptoReplaceFfxFpeConfig`" + }, + "reidentifyTemplateName": { + "description": "Optional template to use. References an instance of `DeidentifyTemplate`.\nAny configuration directly specified in `reidentify_config` or\n`inspect_config` will override those set in the template. Singular fields\nthat are set in this request will replace their corresponding fields in the\ntemplate. Repeated fields are appended. Singular sub-messages and groups\nare recursively merged.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ReidentifyContentResponse": { + "description": "Results of re-identifying a item.", + "id": "GooglePrivacyDlpV2ReidentifyContentResponse", + "properties": { + "item": { + "$ref": "GooglePrivacyDlpV2ContentItem", + "description": "The re-identified item." + }, + "overview": { + "$ref": "GooglePrivacyDlpV2TransformationOverview", + "description": "An overview of the changes that were made to the `item`." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ReplaceValueConfig": { + "description": "Replace each input value with a given `Value`.", + "id": "GooglePrivacyDlpV2ReplaceValueConfig", + "properties": { + "newValue": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "Value to replace it with." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ReplaceWithInfoTypeConfig": { + "description": "Replace each matching finding with the name of the info_type.", + "id": "GooglePrivacyDlpV2ReplaceWithInfoTypeConfig", + "properties": {}, + "type": "object" + }, + "GooglePrivacyDlpV2RequestedOptions": { + "id": "GooglePrivacyDlpV2RequestedOptions", + "properties": { + "jobConfig": { + "$ref": "GooglePrivacyDlpV2InspectJobConfig" + }, + "snapshotInspectTemplate": { + "$ref": "GooglePrivacyDlpV2InspectTemplate", + "description": "If run with an inspect template, a snapshot of it's state at the time of\nthis run." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Result": { + "id": "GooglePrivacyDlpV2Result", + "properties": { + "infoTypeStats": { + "description": "Statistics of how many instances of each info type were found during\ninspect job.", + "items": { + "$ref": "GooglePrivacyDlpV2InfoTypeStats" + }, + "type": "array" + }, + "processedBytes": { + "description": "Total size in bytes that were processed.", + "format": "int64", + "type": "string" + }, + "totalEstimatedBytes": { + "description": "Estimate of the number of bytes to process.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2RiskAnalysisJobConfig": { + "description": "Configuration for a risk analysis job.", + "id": "GooglePrivacyDlpV2RiskAnalysisJobConfig", + "properties": { + "actions": { + "description": "Actions to execute at the completion of the job. Are executed in the order\nprovided.", + "items": { + "$ref": "GooglePrivacyDlpV2Action" + }, + "type": "array" + }, + "privacyMetric": { + "$ref": "GooglePrivacyDlpV2PrivacyMetric", + "description": "Privacy metric to compute." + }, + "sourceTable": { + "$ref": "GooglePrivacyDlpV2BigQueryTable", + "description": "Input dataset to compute metrics over." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Row": { + "id": "GooglePrivacyDlpV2Row", + "properties": { + "values": { + "items": { + "$ref": "GooglePrivacyDlpV2Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2SaveFindings": { + "description": "If set, the detailed findings will be persisted to the specified\nOutputStorageConfig. Compatible with: Inspect", + "id": "GooglePrivacyDlpV2SaveFindings", + "properties": { + "outputConfig": { + "$ref": "GooglePrivacyDlpV2OutputStorageConfig" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Schedule": { + "description": "Schedule for triggeredJobs.", + "id": "GooglePrivacyDlpV2Schedule", + "properties": { + "recurrencePeriodDuration": { + "description": "With this option a job is started a regular periodic basis. For\nexample: every 10 minutes.\n\nA scheduled start time will be skipped if the previous\nexecution has not ended when its scheduled time occurs.\n\nThis value must be set to a time duration greater than or equal\nto 60 minutes and can be no longer than 60 days.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2StorageConfig": { + "description": "Shared message indicating Cloud storage type.", + "id": "GooglePrivacyDlpV2StorageConfig", + "properties": { + "bigQueryOptions": { + "$ref": "GooglePrivacyDlpV2BigQueryOptions", + "description": "BigQuery options specification." + }, + "cloudStorageOptions": { + "$ref": "GooglePrivacyDlpV2CloudStorageOptions", + "description": "Google Cloud Storage options specification." + }, + "datastoreOptions": { + "$ref": "GooglePrivacyDlpV2DatastoreOptions", + "description": "Google Cloud Datastore options specification." + }, + "timespanConfig": { + "$ref": "GooglePrivacyDlpV2TimespanConfig" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2SummaryResult": { + "description": "A collection that informs the user the number of times a particular\n`TransformationResultCode` and error details occurred.", + "id": "GooglePrivacyDlpV2SummaryResult", + "properties": { + "code": { + "enum": [ + "TRANSFORMATION_RESULT_CODE_UNSPECIFIED", + "SUCCESS", + "ERROR" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "count": { + "format": "int64", + "type": "string" + }, + "details": { + "description": "A place for warnings or errors to show up if a transformation didn't\nwork as expected.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2SurrogateType": { + "description": "Message for detecting output from deidentification transformations\nsuch as\n[`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2/content/deidentify#CryptoReplaceFfxFpeConfig).\nThese types of transformations are\nthose that perform pseudonymization, thereby producing a \"surrogate\" as\noutput. This should be used in conjunction with a field on the\ntransformation such as `surrogate_info_type`. This custom info type does\nnot support the use of `detection_rules`.", + "id": "GooglePrivacyDlpV2SurrogateType", + "properties": {}, + "type": "object" + }, + "GooglePrivacyDlpV2Table": { + "description": "Structured content to inspect. Up to 50,000 `Value`s per request allowed.", + "id": "GooglePrivacyDlpV2Table", + "properties": { + "headers": { + "items": { + "$ref": "GooglePrivacyDlpV2FieldId" + }, + "type": "array" + }, + "rows": { + "items": { + "$ref": "GooglePrivacyDlpV2Row" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TableLocation": { + "description": "Location of a finding within a table.", + "id": "GooglePrivacyDlpV2TableLocation", + "properties": { + "rowIndex": { + "description": "The zero-based index of the row where the finding is located.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TaggedField": { + "description": "A column with a semantic tag attached.", + "id": "GooglePrivacyDlpV2TaggedField", + "properties": { + "customTag": { + "description": "A column can be tagged with a custom tag. In this case, the user must\nindicate an auxiliary table that contains statistical information on\nthe possible values of this column (below).", + "type": "string" + }, + "field": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Identifies the column. [required]" + }, + "inferred": { + "$ref": "GoogleProtobufEmpty", + "description": "If no semantic tag is indicated, we infer the statistical model from\nthe distribution of values in the input data" + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "A column can be tagged with a InfoType to use the relevant public\ndataset as a statistical model of population, if available. We\ncurrently support US ZIP codes, region codes, ages and genders.\nTo programmatically obtain the list of supported InfoTypes, use\nListInfoTypes with the supported_by=RISK_ANALYSIS filter." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TimePartConfig": { + "description": "For use with `Date`, `Timestamp`, and `TimeOfDay`, extract or preserve a\nportion of the value.", + "id": "GooglePrivacyDlpV2TimePartConfig", + "properties": { + "partToExtract": { + "enum": [ + "TIME_PART_UNSPECIFIED", + "YEAR", + "MONTH", + "DAY_OF_MONTH", + "DAY_OF_WEEK", + "WEEK_OF_YEAR", + "HOUR_OF_DAY" + ], + "enumDescriptions": [ + "", + "[0-9999]", + "[1-12]", + "[1-31]", + "[1-7]", + "[1-52]", + "[0-23]" + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TimeZone": { + "id": "GooglePrivacyDlpV2TimeZone", + "properties": { + "offsetMinutes": { + "description": "Set only if the offset can be determined. Positive for time ahead of UTC.\nE.g. For \"UTC-9\", this value is -540.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TimespanConfig": { + "description": "Configuration of the timespan of the items to include in scanning.\nCurrently only supported when inspecting Google Cloud Storage and BigQuery.", + "id": "GooglePrivacyDlpV2TimespanConfig", + "properties": { + "enableAutoPopulationOfTimespanConfig": { + "description": "When the job is started by a JobTrigger we will automatically figure out\na valid start_time to avoid scanning files that have not been modified\nsince the last time the JobTrigger executed. This will be based on the\ntime of the execution of the last run of the JobTrigger.", + "type": "boolean" + }, + "endTime": { + "description": "Exclude files newer than this value.\nIf set to zero, no upper time limit is applied.", + "format": "google-datetime", + "type": "string" + }, + "startTime": { + "description": "Exclude files older than this value.", + "format": "google-datetime", + "type": "string" + }, + "timestampField": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Specification of the field containing the timestamp of scanned items.\nRequired for data sources like Datastore or BigQuery.\nThe valid data types of the timestamp field are:\nfor BigQuery - timestamp, date, datetime;\nfor Datastore - timestamp.\nDatastore entity will be scanned if the timestamp property does not exist\nor its value is empty or invalid." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TransformationOverview": { + "description": "Overview of the modifications that occurred.", + "id": "GooglePrivacyDlpV2TransformationOverview", + "properties": { + "transformationSummaries": { + "description": "Transformations applied to the dataset.", + "items": { + "$ref": "GooglePrivacyDlpV2TransformationSummary" + }, + "type": "array" + }, + "transformedBytes": { + "description": "Total size in bytes that were transformed in some way.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TransformationSummary": { + "description": "Summary of a single tranformation.\nOnly one of 'transformation', 'field_transformation', or 'record_suppress'\nwill be set.", + "id": "GooglePrivacyDlpV2TransformationSummary", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2FieldId", + "description": "Set if the transformation was limited to a specific FieldId." + }, + "fieldTransformations": { + "description": "The field transformation that was applied.\nIf multiple field transformations are requested for a single field,\nthis list will contain all of them; otherwise, only one is supplied.", + "items": { + "$ref": "GooglePrivacyDlpV2FieldTransformation" + }, + "type": "array" + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2InfoType", + "description": "Set if the transformation was limited to a specific info_type." + }, + "recordSuppress": { + "$ref": "GooglePrivacyDlpV2RecordSuppression", + "description": "The specific suppression option these stats apply to." + }, + "results": { + "items": { + "$ref": "GooglePrivacyDlpV2SummaryResult" + }, + "type": "array" + }, + "transformation": { + "$ref": "GooglePrivacyDlpV2PrimitiveTransformation", + "description": "The specific transformation these stats apply to." + }, + "transformedBytes": { + "description": "Total size in bytes that were transformed in some way.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2TransientCryptoKey": { + "description": "Use this to have a random data crypto key generated.\nIt will be discarded after the request finishes.", + "id": "GooglePrivacyDlpV2TransientCryptoKey", + "properties": { + "name": { + "description": "Name of the key. [required]\nThis is an arbitrary string used to differentiate different keys.\nA unique key is generated per name: two separate `TransientCryptoKey`\nprotos share the same generated key if their names are the same.\nWhen the data crypto key is generated, this name is not used in any way\n(repeating the api call will result in a different key being generated).", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Trigger": { + "description": "What event needs to occur for a new job to be started.", + "id": "GooglePrivacyDlpV2Trigger", + "properties": { + "schedule": { + "$ref": "GooglePrivacyDlpV2Schedule", + "description": "Create a job on a repeating basis based on the elapse of time." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2UnwrappedCryptoKey": { + "description": "Using raw keys is prone to security risks due to accidentally\nleaking the key. Choose another type of key if possible.", + "id": "GooglePrivacyDlpV2UnwrappedCryptoKey", + "properties": { + "key": { + "description": "The AES 128/192/256 bit key. [required]", + "format": "byte", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest": { + "description": "Request message for UpdateDeidentifyTemplate.", + "id": "GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest", + "properties": { + "deidentifyTemplate": { + "$ref": "GooglePrivacyDlpV2DeidentifyTemplate", + "description": "New DeidentifyTemplate value." + }, + "updateMask": { + "description": "Mask to control which fields get updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2UpdateInspectTemplateRequest": { + "description": "Request message for UpdateInspectTemplate.", + "id": "GooglePrivacyDlpV2UpdateInspectTemplateRequest", + "properties": { + "inspectTemplate": { + "$ref": "GooglePrivacyDlpV2InspectTemplate", + "description": "New InspectTemplate value." + }, + "updateMask": { + "description": "Mask to control which fields get updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2UpdateJobTriggerRequest": { + "description": "Request message for UpdateJobTrigger.", + "id": "GooglePrivacyDlpV2UpdateJobTriggerRequest", + "properties": { + "jobTrigger": { + "$ref": "GooglePrivacyDlpV2JobTrigger", + "description": "New JobTrigger value." + }, + "updateMask": { + "description": "Mask to control which fields get updated.", + "format": "google-fieldmask", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2Value": { + "description": "Set of primitive values supported by the system.\nNote that for the purposes of inspection or transformation, the number\nof bytes considered to comprise a 'Value' is based on its representation\nas a UTF-8 encoded string. For example, if 'integer_value' is set to\n123456789, the number of bytes would be counted as 9, even though an\nint64 only holds up to 8 bytes of data.", + "id": "GooglePrivacyDlpV2Value", + "properties": { + "booleanValue": { + "type": "boolean" + }, + "dateValue": { + "$ref": "GoogleTypeDate" + }, + "dayOfWeekValue": { + "enum": [ + "DAY_OF_WEEK_UNSPECIFIED", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ], + "enumDescriptions": [ + "The unspecified day-of-week.", + "The day-of-week of Monday.", + "The day-of-week of Tuesday.", + "The day-of-week of Wednesday.", + "The day-of-week of Thursday.", + "The day-of-week of Friday.", + "The day-of-week of Saturday.", + "The day-of-week of Sunday." + ], + "type": "string" + }, + "floatValue": { + "format": "double", + "type": "number" + }, + "integerValue": { + "format": "int64", + "type": "string" + }, + "stringValue": { + "type": "string" + }, + "timeValue": { + "$ref": "GoogleTypeTimeOfDay" + }, + "timestampValue": { + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2ValueFrequency": { + "description": "A value of a field, including its frequency.", + "id": "GooglePrivacyDlpV2ValueFrequency", + "properties": { + "count": { + "description": "How many times the value is contained in the field.", + "format": "int64", + "type": "string" + }, + "value": { + "$ref": "GooglePrivacyDlpV2Value", + "description": "A value contained in the field in question." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2WordList": { + "description": "Message defining a list of words or phrases to search for in the data.", + "id": "GooglePrivacyDlpV2WordList", + "properties": { + "words": { + "description": "Words or phrases defining the dictionary. The dictionary must contain\nat least one phrase and every phrase must contain at least 2 characters\nthat are letters or digits. [required]", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1AuxiliaryTable": { + "description": "An auxiliary table contains statistical information on the relative\nfrequency of different quasi-identifiers values. It has one or several\nquasi-identifiers columns, and one column that indicates the relative\nfrequency of each quasi-identifier tuple.\nIf a tuple is present in the data but not in the auxiliary table, the\ncorresponding relative frequency is assumed to be zero (and thus, the\ntuple is highly reidentifiable).", + "id": "GooglePrivacyDlpV2beta1AuxiliaryTable", + "properties": { + "quasiIds": { + "description": "Quasi-identifier columns. [required]", + "items": { + "$ref": "GooglePrivacyDlpV2beta1QuasiIdField" + }, + "type": "array" + }, + "relativeFrequency": { + "$ref": "GooglePrivacyDlpV2beta1FieldId", + "description": "The relative frequency column must contain a floating-point number\nbetween 0 and 1 (inclusive). Null values are assumed to be zero.\n[required]" + }, + "table": { + "$ref": "GooglePrivacyDlpV2beta1BigQueryTable", + "description": "Auxiliary table location. [required]" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1BigQueryOptions": { + "description": "Options defining BigQuery table and row identifiers.", + "id": "GooglePrivacyDlpV2beta1BigQueryOptions", + "properties": { + "identifyingFields": { + "description": "References to fields uniquely identifying rows within the table.\nNested fields in the format, like `person.birthdate.year`, are allowed.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1FieldId" + }, + "type": "array" + }, + "tableReference": { + "$ref": "GooglePrivacyDlpV2beta1BigQueryTable", + "description": "Complete BigQuery table reference." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1BigQueryTable": { + "description": "Message defining the location of a BigQuery table. A table is uniquely\nidentified by its project_id, dataset_id, and table_name. Within a query\na table is often referenced with a string in the format of:\n`\u003cproject_id\u003e:\u003cdataset_id\u003e.\u003ctable_id\u003e` or\n`\u003cproject_id\u003e.\u003cdataset_id\u003e.\u003ctable_id\u003e`.", + "id": "GooglePrivacyDlpV2beta1BigQueryTable", + "properties": { + "datasetId": { + "description": "Dataset ID of the table.", + "type": "string" + }, + "projectId": { + "description": "The Google Cloud Platform project ID of the project containing the table.\nIf omitted, project ID is inferred from the API call.", + "type": "string" + }, + "tableId": { + "description": "Name of the table.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1CategoricalStatsConfig": { + "description": "Compute numerical stats over an individual column, including\nnumber of distinct values and value count distribution.", + "id": "GooglePrivacyDlpV2beta1CategoricalStatsConfig", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2beta1FieldId", + "description": "Field to compute categorical stats on. All column types are\nsupported except for arrays and structs. However, it may be more\ninformative to use NumericalStats when the field type is supported,\ndepending on the data." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket": { + "description": "Histogram bucket of value frequencies in the column.", + "id": "GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket", + "properties": { + "bucketSize": { + "description": "Total number of records in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of value frequencies in this bucket. The total number of\nvalues returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1ValueFrequency" + }, + "type": "array" + }, + "valueFrequencyLowerBound": { + "description": "Lower bound on the value frequency of the values in this bucket.", + "format": "int64", + "type": "string" + }, + "valueFrequencyUpperBound": { + "description": "Upper bound on the value frequency of the values in this bucket.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1CategoricalStatsResult": { + "description": "Result of the categorical stats computation.", + "id": "GooglePrivacyDlpV2beta1CategoricalStatsResult", + "properties": { + "valueFrequencyHistogramBuckets": { + "description": "Histogram of value frequencies in the column.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1CloudStorageOptions": { + "description": "Options defining a file or a set of files (path ending with *) within\na Google Cloud Storage bucket.", + "id": "GooglePrivacyDlpV2beta1CloudStorageOptions", + "properties": { + "fileSet": { + "$ref": "GooglePrivacyDlpV2beta1FileSet" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1CloudStoragePath": { + "description": "A location in Cloud Storage.", + "id": "GooglePrivacyDlpV2beta1CloudStoragePath", + "properties": { + "path": { + "description": "The url, in the format of `gs://bucket/\u003cpath\u003e`.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1CustomInfoType": { + "description": "Custom information type provided by the user. Used to find domain-specific\nsensitive information configurable to the data in question.", + "id": "GooglePrivacyDlpV2beta1CustomInfoType", + "properties": { + "dictionary": { + "$ref": "GooglePrivacyDlpV2beta1Dictionary", + "description": "Dictionary-based custom info type." + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2beta1InfoType", + "description": "Info type configuration. All custom info types must have configurations\nthat do not conflict with built-in info types or other custom info types." + }, + "surrogateType": { + "$ref": "GooglePrivacyDlpV2beta1SurrogateType", + "description": "Surrogate info type." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1DatastoreOptions": { + "description": "Options defining a data set within Google Cloud Datastore.", + "id": "GooglePrivacyDlpV2beta1DatastoreOptions", + "properties": { + "kind": { + "$ref": "GooglePrivacyDlpV2beta1KindExpression", + "description": "The kind to process." + }, + "partitionId": { + "$ref": "GooglePrivacyDlpV2beta1PartitionId", + "description": "A partition ID identifies a grouping of entities. The grouping is always\nby project and namespace, however the namespace ID may be empty." + }, + "projection": { + "description": "Properties to scan. If none are specified, all properties will be scanned\nby default.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1Projection" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1Dictionary": { + "description": "Custom information type based on a dictionary of words or phrases. This can\nbe used to match sensitive information specific to the data, such as a list\nof employee IDs or job titles.\n\nDictionary words are case-insensitive and all characters other than letters\nand digits in the unicode [Basic Multilingual\nPlane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane)\nwill be replaced with whitespace when scanning for matches, so the\ndictionary phrase \"Sam Johnson\" will match all three phrases \"sam johnson\",\n\"Sam, Johnson\", and \"Sam (Johnson)\". Additionally, the characters\nsurrounding any match must be of a different type than the adjacent\ncharacters within the word, so letters must be next to non-letters and\ndigits next to non-digits. For example, the dictionary word \"jen\" will\nmatch the first three letters of the text \"jen123\" but will return no\nmatches for \"jennifer\".\n\nDictionary words containing a large number of characters that are not\nletters or digits may result in unexpected findings because such characters\nare treated as whitespace.", + "id": "GooglePrivacyDlpV2beta1Dictionary", + "properties": { + "wordList": { + "$ref": "GooglePrivacyDlpV2beta1WordList", + "description": "List of words or phrases to search for." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1EntityId": { + "description": "An entity in a dataset is a field or set of fields that correspond to a\nsingle person. For example, in medical records the `EntityId` might be\na patient identifier, or for financial records it might be an account\nidentifier. This message is used when generalizations or analysis must be\nconsistent across multiple rows pertaining to the same entity.", + "id": "GooglePrivacyDlpV2beta1EntityId", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2beta1FieldId", + "description": "Composite key indicating which field contains the entity identifier." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1FieldId": { + "description": "General identifier of a data field in a storage service.", + "id": "GooglePrivacyDlpV2beta1FieldId", + "properties": { + "columnName": { + "description": "Name describing the field.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1FileSet": { + "description": "Set of files to scan.", + "id": "GooglePrivacyDlpV2beta1FileSet", + "properties": { + "url": { + "description": "The url, in the format `gs://\u003cbucket\u003e/\u003cpath\u003e`. Trailing wildcard in the\npath is allowed.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1InfoType": { + "description": "Type of information detected by the API.", + "id": "GooglePrivacyDlpV2beta1InfoType", + "properties": { + "name": { + "description": "Name of the information type.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1InfoTypeLimit": { + "description": "Max findings configuration per info type, per content item or long running\noperation.", + "id": "GooglePrivacyDlpV2beta1InfoTypeLimit", + "properties": { + "infoType": { + "$ref": "GooglePrivacyDlpV2beta1InfoType", + "description": "Type of information the findings limit applies to. Only one limit per\ninfo_type should be provided. If InfoTypeLimit does not have an\ninfo_type, the DLP API applies the limit against all info_types that are\nfound but not specified in another InfoTypeLimit." + }, + "maxFindings": { + "description": "Max findings limit for the given infoType.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1InfoTypeStatistics": { + "description": "Statistics regarding a specific InfoType.", + "id": "GooglePrivacyDlpV2beta1InfoTypeStatistics", + "properties": { + "count": { + "description": "Number of findings for this info type.", + "format": "int64", + "type": "string" + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2beta1InfoType", + "description": "The type of finding this stat is for." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1InspectConfig": { + "description": "Configuration description of the scanning process.\nWhen used with redactContent only info_types and min_likelihood are currently\nused.", + "id": "GooglePrivacyDlpV2beta1InspectConfig", + "properties": { + "customInfoTypes": { + "description": "Custom info types provided by the user.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1CustomInfoType" + }, + "type": "array" + }, + "excludeTypes": { + "description": "When true, excludes type information of the findings.", + "type": "boolean" + }, + "includeQuote": { + "description": "When true, a contextual quote from the data that triggered a finding is\nincluded in the response; see Finding.quote.", + "type": "boolean" + }, + "infoTypeLimits": { + "description": "Configuration of findings limit given for specified info types.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1InfoTypeLimit" + }, + "type": "array" + }, + "infoTypes": { + "description": "Restricts what info_types to look for. The values must correspond to\nInfoType values returned by ListInfoTypes or found in documentation.\nEmpty info_types runs all enabled detectors.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1InfoType" + }, + "type": "array" + }, + "maxFindings": { + "description": "Limits the number of findings per content item or long running operation.", + "format": "int32", + "type": "integer" + }, + "minLikelihood": { + "description": "Only returns findings equal or above this threshold.", + "enum": [ + "LIKELIHOOD_UNSPECIFIED", + "VERY_UNLIKELY", + "UNLIKELY", + "POSSIBLE", + "LIKELY", + "VERY_LIKELY" + ], + "enumDescriptions": [ + "Default value; information with all likelihoods is included.", + "Few matching elements.", + "", + "Some matching elements.", + "", + "Many matching elements." + ], + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1InspectOperationMetadata": { + "description": "Metadata returned within GetOperation for an inspect request.", + "id": "GooglePrivacyDlpV2beta1InspectOperationMetadata", + "properties": { + "createTime": { + "description": "The time which this request was started.", + "format": "google-datetime", + "type": "string" + }, + "infoTypeStats": { + "items": { + "$ref": "GooglePrivacyDlpV2beta1InfoTypeStatistics" + }, + "type": "array" + }, + "processedBytes": { + "description": "Total size in bytes that were processed.", + "format": "int64", + "type": "string" + }, + "requestInspectConfig": { + "$ref": "GooglePrivacyDlpV2beta1InspectConfig", + "description": "The inspect config used to create the Operation." + }, + "requestOutputConfig": { + "$ref": "GooglePrivacyDlpV2beta1OutputStorageConfig", + "description": "Optional location to store findings." + }, + "requestStorageConfig": { + "$ref": "GooglePrivacyDlpV2beta1StorageConfig", + "description": "The storage config used to create the Operation." + }, + "totalEstimatedBytes": { + "description": "Estimate of the number of bytes to process.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1InspectOperationResult": { + "description": "The operational data.", + "id": "GooglePrivacyDlpV2beta1InspectOperationResult", + "properties": { + "name": { + "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `inspect/results/{id}`.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KAnonymityConfig": { + "description": "k-anonymity metric, used for analysis of reidentification risk.", + "id": "GooglePrivacyDlpV2beta1KAnonymityConfig", + "properties": { + "entityId": { + "$ref": "GooglePrivacyDlpV2beta1EntityId", + "description": "Optional message indicating that each distinct entity_id should not\ncontribute to the k-anonymity count more than once per equivalence class.\nIf an entity_id appears on several rows with different quasi-identifier\ntuples, it will contribute to each count exactly once.\n\nThis can lead to unexpected results. Consider a table where ID 1 is\nassociated to quasi-identifier \"foo\", ID 2 to \"bar\", and ID 3 to *both*\nquasi-identifiers \"foo\" and \"bar\" (on separate rows), and where this ID\nis used as entity_id. Then, the anonymity value associated to ID 3 will\nbe 2, even if it is the only ID to be associated to both values \"foo\" and\n\"bar\"." + }, + "quasiIds": { + "description": "Set of fields to compute k-anonymity over. When multiple fields are\nspecified, they are considered a single composite key. Structs and\nrepeated data types are not supported; however, nested fields are\nsupported so long as they are not structs themselves or nested within\na repeated field.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1FieldId" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass": { + "description": "The set of columns' values that share the same k-anonymity value.", + "id": "GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass", + "properties": { + "equivalenceClassSize": { + "description": "Size of the equivalence class, for example number of rows with the\nabove set of values.", + "format": "int64", + "type": "string" + }, + "quasiIdsValues": { + "description": "Set of values defining the equivalence class. One value per\nquasi-identifier column in the original KAnonymity metric message.\nThe order is always the same as the original request.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KAnonymityHistogramBucket": { + "description": "Histogram bucket of equivalence class sizes in the table.", + "id": "GooglePrivacyDlpV2beta1KAnonymityHistogramBucket", + "properties": { + "bucketSize": { + "description": "Total number of records in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of equivalence classes in this bucket. The total number of\nclasses returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass" + }, + "type": "array" + }, + "equivalenceClassSizeLowerBound": { + "description": "Lower bound on the size of the equivalence classes in this bucket.", + "format": "int64", + "type": "string" + }, + "equivalenceClassSizeUpperBound": { + "description": "Upper bound on the size of the equivalence classes in this bucket.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KAnonymityResult": { + "description": "Result of the k-anonymity computation.", + "id": "GooglePrivacyDlpV2beta1KAnonymityResult", + "properties": { + "equivalenceClassHistogramBuckets": { + "description": "Histogram of k-anonymity equivalence classes.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1KAnonymityHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KMapEstimationConfig": { + "description": "Reidentifiability metric. This corresponds to a risk model similar to what\nis called \"journalist risk\" in the literature, except the attack dataset is\nstatistically modeled instead of being perfectly known. This can be done\nusing publicly available data (like the US Census), or using a custom\nstatistical model (indicated as one or several BigQuery tables), or by\nextrapolating from the distribution of values in the input dataset.", + "id": "GooglePrivacyDlpV2beta1KMapEstimationConfig", + "properties": { + "auxiliaryTables": { + "description": "Several auxiliary tables can be used in the analysis. Each custom_tag\nused to tag a quasi-identifiers column must appear in exactly one column\nof one auxiliary table.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1AuxiliaryTable" + }, + "type": "array" + }, + "quasiIds": { + "description": "Fields considered to be quasi-identifiers. No two columns can have the\nsame tag. [required]", + "items": { + "$ref": "GooglePrivacyDlpV2beta1TaggedField" + }, + "type": "array" + }, + "regionCode": { + "description": "ISO 3166-1 alpha-2 region code to use in the statistical modeling.\nRequired if no column is tagged with a region-specific InfoType (like\nUS_ZIP_5) or a region code.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket": { + "description": "A KMapEstimationHistogramBucket message with the following values:\n min_anonymity: 3\n max_anonymity: 5\n frequency: 42\nmeans that there are 42 records whose quasi-identifier values correspond\nto 3, 4 or 5 people in the overlying population. An important particular\ncase is when min_anonymity = max_anonymity = 1: the frequency field then\ncorresponds to the number of uniquely identifiable records.", + "id": "GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket", + "properties": { + "bucketSize": { + "description": "Number of records within these anonymity bounds.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of quasi-identifier tuple values in this bucket. The total\nnumber of classes returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues" + }, + "type": "array" + }, + "maxAnonymity": { + "description": "Always greater than or equal to min_anonymity.", + "format": "int64", + "type": "string" + }, + "minAnonymity": { + "description": "Always positive.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues": { + "description": "A tuple of values for the quasi-identifier columns.", + "id": "GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues", + "properties": { + "estimatedAnonymity": { + "description": "The estimated anonymity for these quasi-identifier values.", + "format": "int64", + "type": "string" + }, + "quasiIdsValues": { + "description": "The quasi-identifier values.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KMapEstimationResult": { + "description": "Result of the reidentifiability analysis. Note that these results are an\nestimation, not exact values.", + "id": "GooglePrivacyDlpV2beta1KMapEstimationResult", + "properties": { + "kMapEstimationHistogram": { + "description": "The intervals [min_anonymity, max_anonymity] do not overlap. If a value\ndoesn't correspond to any such interval, the associated frequency is\nzero. For example, the following records:\n {min_anonymity: 1, max_anonymity: 1, frequency: 17}\n {min_anonymity: 2, max_anonymity: 3, frequency: 42}\n {min_anonymity: 5, max_anonymity: 10, frequency: 99}\nmean that there are no record with an estimated anonymity of 4, 5, or\nlarger than 10.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1KindExpression": { + "description": "A representation of a Datastore kind.", + "id": "GooglePrivacyDlpV2beta1KindExpression", + "properties": { + "name": { + "description": "The name of the kind.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1LDiversityConfig": { + "description": "l-diversity metric, used for analysis of reidentification risk.", + "id": "GooglePrivacyDlpV2beta1LDiversityConfig", + "properties": { + "quasiIds": { + "description": "Set of quasi-identifiers indicating how equivalence classes are\ndefined for the l-diversity computation. When multiple fields are\nspecified, they are considered a single composite key.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1FieldId" + }, + "type": "array" + }, + "sensitiveAttribute": { + "$ref": "GooglePrivacyDlpV2beta1FieldId", + "description": "Sensitive field for computing the l-value." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1LDiversityEquivalenceClass": { + "description": "The set of columns' values that share the same l-diversity value.", + "id": "GooglePrivacyDlpV2beta1LDiversityEquivalenceClass", + "properties": { + "equivalenceClassSize": { + "description": "Size of the k-anonymity equivalence class.", + "format": "int64", + "type": "string" + }, + "numDistinctSensitiveValues": { + "description": "Number of distinct sensitive values in this equivalence class.", + "format": "int64", + "type": "string" + }, + "quasiIdsValues": { + "description": "Quasi-identifier values defining the k-anonymity equivalence\nclass. The order is always the same as the original request.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1Value" + }, + "type": "array" + }, + "topSensitiveValues": { + "description": "Estimated frequencies of top sensitive values.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1ValueFrequency" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1LDiversityHistogramBucket": { + "description": "Histogram bucket of sensitive value frequencies in the table.", + "id": "GooglePrivacyDlpV2beta1LDiversityHistogramBucket", + "properties": { + "bucketSize": { + "description": "Total number of records in this bucket.", + "format": "int64", + "type": "string" + }, + "bucketValues": { + "description": "Sample of equivalence classes in this bucket. The total number of\nclasses returned per bucket is capped at 20.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1LDiversityEquivalenceClass" + }, + "type": "array" + }, + "sensitiveValueFrequencyLowerBound": { + "description": "Lower bound on the sensitive value frequencies of the equivalence\nclasses in this bucket.", + "format": "int64", + "type": "string" + }, + "sensitiveValueFrequencyUpperBound": { + "description": "Upper bound on the sensitive value frequencies of the equivalence\nclasses in this bucket.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1LDiversityResult": { + "description": "Result of the l-diversity computation.", + "id": "GooglePrivacyDlpV2beta1LDiversityResult", + "properties": { + "sensitiveValueFrequencyHistogramBuckets": { + "description": "Histogram of l-diversity equivalence class sensitive value frequencies.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1LDiversityHistogramBucket" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1NumericalStatsConfig": { + "description": "Compute numerical stats over an individual column, including\nmin, max, and quantiles.", + "id": "GooglePrivacyDlpV2beta1NumericalStatsConfig", + "properties": { + "field": { + "$ref": "GooglePrivacyDlpV2beta1FieldId", + "description": "Field to compute numerical stats on. Supported types are\ninteger, float, date, datetime, timestamp, time." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1NumericalStatsResult": { + "description": "Result of the numerical stats computation.", + "id": "GooglePrivacyDlpV2beta1NumericalStatsResult", + "properties": { + "maxValue": { + "$ref": "GooglePrivacyDlpV2beta1Value", + "description": "Maximum value appearing in the column." + }, + "minValue": { + "$ref": "GooglePrivacyDlpV2beta1Value", + "description": "Minimum value appearing in the column." + }, + "quantileValues": { + "description": "List of 99 values that partition the set of field values into 100 equal\nsized buckets.", + "items": { + "$ref": "GooglePrivacyDlpV2beta1Value" + }, + "type": "array" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1OutputStorageConfig": { + "description": "Cloud repository for storing output.", + "id": "GooglePrivacyDlpV2beta1OutputStorageConfig", + "properties": { + "storagePath": { + "$ref": "GooglePrivacyDlpV2beta1CloudStoragePath", + "description": "The path to a Google Cloud Storage location to store output.\nThe bucket must already exist and\nthe Google APIs service account for DLP must have write permission to\nwrite to the given bucket.\nResults are split over multiple csv files with each file name matching\nthe pattern \"[operation_id]_[count].csv\", for example\n`3094877188788974909_1.csv`. The `operation_id` matches the\nidentifier for the Operation, and the `count` is a counter used for\ntracking the number of files written.\n\nThe CSV file(s) contain the following columns regardless of storage type\nscanned:\n- id\n- info_type\n- likelihood\n- byte size of finding\n- quote\n- timestamp\n\nFor Cloud Storage the next columns are:\n\n- file_path\n- start_offset\n\nFor Cloud Datastore the next columns are:\n\n- project_id\n- namespace_id\n- path\n- column_name\n- offset\n\nFor BigQuery the next columns are:\n\n- row_number\n- project_id\n- dataset_id\n- table_id" + }, + "table": { + "$ref": "GooglePrivacyDlpV2beta1BigQueryTable", + "description": "Store findings in a new table in the dataset." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1PartitionId": { + "description": "Datastore partition ID.\nA partition ID identifies a grouping of entities. The grouping is always\nby project and namespace, however the namespace ID may be empty.\n\nA partition ID contains several dimensions:\nproject ID and namespace ID.", + "id": "GooglePrivacyDlpV2beta1PartitionId", + "properties": { + "namespaceId": { + "description": "If not empty, the ID of the namespace to which the entities belong.", + "type": "string" + }, + "projectId": { + "description": "The ID of the project to which the entities belong.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1PrivacyMetric": { + "description": "Privacy metric to compute for reidentification risk analysis.", + "id": "GooglePrivacyDlpV2beta1PrivacyMetric", + "properties": { + "categoricalStatsConfig": { + "$ref": "GooglePrivacyDlpV2beta1CategoricalStatsConfig" + }, + "kAnonymityConfig": { + "$ref": "GooglePrivacyDlpV2beta1KAnonymityConfig" + }, + "kMapEstimationConfig": { + "$ref": "GooglePrivacyDlpV2beta1KMapEstimationConfig" + }, + "lDiversityConfig": { + "$ref": "GooglePrivacyDlpV2beta1LDiversityConfig" + }, + "numericalStatsConfig": { + "$ref": "GooglePrivacyDlpV2beta1NumericalStatsConfig" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1Projection": { + "description": "A representation of a Datastore property in a projection.", + "id": "GooglePrivacyDlpV2beta1Projection", + "properties": { + "property": { + "$ref": "GooglePrivacyDlpV2beta1PropertyReference", + "description": "The property to project." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1PropertyReference": { + "description": "A reference to a property relative to the Datastore kind expressions.", + "id": "GooglePrivacyDlpV2beta1PropertyReference", + "properties": { + "name": { + "description": "The name of the property.\nIf name includes \".\"s, it may be interpreted as a property name path.", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1QuasiIdField": { + "description": "A quasi-identifier column has a custom_tag, used to know which column\nin the data corresponds to which column in the statistical model.", + "id": "GooglePrivacyDlpV2beta1QuasiIdField", + "properties": { + "customTag": { + "type": "string" + }, + "field": { + "$ref": "GooglePrivacyDlpV2beta1FieldId" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1RiskAnalysisOperationMetadata": { + "description": "Metadata returned within the\n[`riskAnalysis.operations.get`](/dlp/docs/reference/rest/v2beta1/riskAnalysis.operations/get)\nfor risk analysis.", + "id": "GooglePrivacyDlpV2beta1RiskAnalysisOperationMetadata", + "properties": { + "createTime": { + "description": "The time which this request was started.", + "format": "google-datetime", + "type": "string" + }, + "requestedPrivacyMetric": { + "$ref": "GooglePrivacyDlpV2beta1PrivacyMetric", + "description": "Privacy metric to compute." + }, + "requestedSourceTable": { + "$ref": "GooglePrivacyDlpV2beta1BigQueryTable", + "description": "Input dataset to compute metrics over." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1RiskAnalysisOperationResult": { + "description": "Result of a risk analysis\n[`Operation`](/dlp/docs/reference/rest/v2beta1/inspect.operations)\nrequest.", + "id": "GooglePrivacyDlpV2beta1RiskAnalysisOperationResult", + "properties": { + "categoricalStatsResult": { + "$ref": "GooglePrivacyDlpV2beta1CategoricalStatsResult" + }, + "kAnonymityResult": { + "$ref": "GooglePrivacyDlpV2beta1KAnonymityResult" + }, + "kMapEstimationResult": { + "$ref": "GooglePrivacyDlpV2beta1KMapEstimationResult" + }, + "lDiversityResult": { + "$ref": "GooglePrivacyDlpV2beta1LDiversityResult" + }, + "numericalStatsResult": { + "$ref": "GooglePrivacyDlpV2beta1NumericalStatsResult" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1StorageConfig": { + "description": "Shared message indicating Cloud storage type.", + "id": "GooglePrivacyDlpV2beta1StorageConfig", + "properties": { + "bigQueryOptions": { + "$ref": "GooglePrivacyDlpV2beta1BigQueryOptions", + "description": "BigQuery options specification." + }, + "cloudStorageOptions": { + "$ref": "GooglePrivacyDlpV2beta1CloudStorageOptions", + "description": "Google Cloud Storage options specification." + }, + "datastoreOptions": { + "$ref": "GooglePrivacyDlpV2beta1DatastoreOptions", + "description": "Google Cloud Datastore options specification." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1SurrogateType": { + "description": "Message for detecting output from deidentification transformations\nsuch as\n[`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2beta1/content/deidentify#CryptoReplaceFfxFpeConfig).\nThese types of transformations are\nthose that perform pseudonymization, thereby producing a \"surrogate\" as\noutput. This should be used in conjunction with a field on the\ntransformation such as `surrogate_info_type`. This custom info type does\nnot support the use of `detection_rules`.", + "id": "GooglePrivacyDlpV2beta1SurrogateType", + "properties": {}, + "type": "object" + }, + "GooglePrivacyDlpV2beta1TaggedField": { + "description": "A column with a semantic tag attached.", + "id": "GooglePrivacyDlpV2beta1TaggedField", + "properties": { + "customTag": { + "description": "A column can be tagged with a custom tag. In this case, the user must\nindicate an auxiliary table that contains statistical information on\nthe possible values of this column (below).", + "type": "string" + }, + "field": { + "$ref": "GooglePrivacyDlpV2beta1FieldId", + "description": "Identifies the column. [required]" + }, + "inferred": { + "$ref": "GoogleProtobufEmpty", + "description": "If no semantic tag is indicated, we infer the statistical model from\nthe distribution of values in the input data" + }, + "infoType": { + "$ref": "GooglePrivacyDlpV2beta1InfoType", + "description": "A column can be tagged with a InfoType to use the relevant public\ndataset as a statistical model of population, if available. We\ncurrently support US ZIP codes, region codes, ages and genders." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1Value": { + "description": "Set of primitive values supported by the system.\nNote that for the purposes of inspection or transformation, the number\nof bytes considered to comprise a 'Value' is based on its representation\nas a UTF-8 encoded string. For example, if 'integer_value' is set to\n123456789, the number of bytes would be counted as 9, even though an\nint64 only holds up to 8 bytes of data.", + "id": "GooglePrivacyDlpV2beta1Value", + "properties": { + "booleanValue": { + "type": "boolean" + }, + "dateValue": { + "$ref": "GoogleTypeDate" + }, + "floatValue": { + "format": "double", + "type": "number" + }, + "integerValue": { + "format": "int64", + "type": "string" + }, + "stringValue": { + "type": "string" + }, + "timeValue": { + "$ref": "GoogleTypeTimeOfDay" + }, + "timestampValue": { + "format": "google-datetime", + "type": "string" + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1ValueFrequency": { + "description": "A value of a field, including its frequency.", + "id": "GooglePrivacyDlpV2beta1ValueFrequency", + "properties": { + "count": { + "description": "How many times the value is contained in the field.", + "format": "int64", + "type": "string" + }, + "value": { + "$ref": "GooglePrivacyDlpV2beta1Value", + "description": "A value contained in the field in question." + } + }, + "type": "object" + }, + "GooglePrivacyDlpV2beta1WordList": { + "description": "Message defining a list of words or phrases to search for in the data.", + "id": "GooglePrivacyDlpV2beta1WordList", + "properties": { + "words": { + "description": "Words or phrases defining the dictionary. The dictionary must contain\nat least one phrase and every phrase must contain at least 2 characters\nthat are letters or digits. [required]", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleProtobufEmpty": { + "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "id": "GoogleProtobufEmpty", + "properties": {}, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", + "id": "GoogleRpcStatus", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, + "GoogleTypeDate": { + "description": "Represents a whole calendar date, e.g. date of birth. The time of day and\ntime zone are either specified elsewhere or are not significant. The date\nis relative to the Proleptic Gregorian Calendar. The day may be 0 to\nrepresent a year and month where the day is not significant, e.g. credit card\nexpiration date. The year may be 0 to represent a month and day independent\nof year, e.g. anniversary date. Related types are google.type.TimeOfDay\nand `google.protobuf.Timestamp`.", + "id": "GoogleTypeDate", + "properties": { + "day": { + "description": "Day of month. Must be from 1 to 31 and valid for the year and month, or 0\nif specifying a year/month where the day is not significant.", + "format": "int32", + "type": "integer" + }, + "month": { + "description": "Month of year. Must be from 1 to 12.", + "format": "int32", + "type": "integer" + }, + "year": { + "description": "Year of date. Must be from 1 to 9999, or 0 if specifying a date without\na year.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "GoogleTypeTimeOfDay": { + "description": "Represents a time of day. The date and time zone are either not significant\nor are specified elsewhere. An API may choose to allow leap seconds. Related\ntypes are google.type.Date and `google.protobuf.Timestamp`.", + "id": "GoogleTypeTimeOfDay", + "properties": { + "hours": { + "description": "Hours of day in 24 hour format. Should be from 0 to 23. An API may choose\nto allow the value \"24:00:00\" for scenarios like business closing time.", + "format": "int32", + "type": "integer" + }, + "minutes": { + "description": "Minutes of hour of day. Must be from 0 to 59.", + "format": "int32", + "type": "integer" + }, + "nanos": { + "description": "Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.", + "format": "int32", + "type": "integer" + }, + "seconds": { + "description": "Seconds of minutes of the time. Must normally be from 0 to 59. An API may\nallow the value 60 if it allows leap-seconds.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + } + }, + "servicePath": "", + "title": "Cloud Data Loss Prevention (DLP) API", + "version": "v2", + "version_module": true +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/dlp/v2/dlp-gen.go b/vendor/google.golang.org/api/dlp/v2/dlp-gen.go new file mode 100644 index 000000000..231d29e3a --- /dev/null +++ b/vendor/google.golang.org/api/dlp/v2/dlp-gen.go @@ -0,0 +1,13276 @@ +// Package dlp provides access to the Cloud Data Loss Prevention (DLP) API. +// +// See https://cloud.google.com/dlp/docs/ +// +// Usage example: +// +// import "google.golang.org/api/dlp/v2" +// ... +// dlpService, err := dlp.New(oauthHttpClient) +package dlp // import "google.golang.org/api/dlp/v2" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + context "golang.org/x/net/context" + ctxhttp "golang.org/x/net/context/ctxhttp" + gensupport "google.golang.org/api/gensupport" + googleapi "google.golang.org/api/googleapi" + "io" + "net/http" + "net/url" + "strconv" + "strings" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = ctxhttp.Do + +const apiId = "dlp:v2" +const apiName = "dlp" +const apiVersion = "v2" +const basePath = "https://dlp.googleapis.com/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" +) + +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.InfoTypes = NewInfoTypesService(s) + s.Organizations = NewOrganizationsService(s) + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + InfoTypes *InfoTypesService + + Organizations *OrganizationsService + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewInfoTypesService(s *Service) *InfoTypesService { + rs := &InfoTypesService{s: s} + return rs +} + +type InfoTypesService struct { + s *Service +} + +func NewOrganizationsService(s *Service) *OrganizationsService { + rs := &OrganizationsService{s: s} + rs.DeidentifyTemplates = NewOrganizationsDeidentifyTemplatesService(s) + rs.InspectTemplates = NewOrganizationsInspectTemplatesService(s) + return rs +} + +type OrganizationsService struct { + s *Service + + DeidentifyTemplates *OrganizationsDeidentifyTemplatesService + + InspectTemplates *OrganizationsInspectTemplatesService +} + +func NewOrganizationsDeidentifyTemplatesService(s *Service) *OrganizationsDeidentifyTemplatesService { + rs := &OrganizationsDeidentifyTemplatesService{s: s} + return rs +} + +type OrganizationsDeidentifyTemplatesService struct { + s *Service +} + +func NewOrganizationsInspectTemplatesService(s *Service) *OrganizationsInspectTemplatesService { + rs := &OrganizationsInspectTemplatesService{s: s} + return rs +} + +type OrganizationsInspectTemplatesService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.Content = NewProjectsContentService(s) + rs.DeidentifyTemplates = NewProjectsDeidentifyTemplatesService(s) + rs.DlpJobs = NewProjectsDlpJobsService(s) + rs.Image = NewProjectsImageService(s) + rs.InspectTemplates = NewProjectsInspectTemplatesService(s) + rs.JobTriggers = NewProjectsJobTriggersService(s) + return rs +} + +type ProjectsService struct { + s *Service + + Content *ProjectsContentService + + DeidentifyTemplates *ProjectsDeidentifyTemplatesService + + DlpJobs *ProjectsDlpJobsService + + Image *ProjectsImageService + + InspectTemplates *ProjectsInspectTemplatesService + + JobTriggers *ProjectsJobTriggersService +} + +func NewProjectsContentService(s *Service) *ProjectsContentService { + rs := &ProjectsContentService{s: s} + return rs +} + +type ProjectsContentService struct { + s *Service +} + +func NewProjectsDeidentifyTemplatesService(s *Service) *ProjectsDeidentifyTemplatesService { + rs := &ProjectsDeidentifyTemplatesService{s: s} + return rs +} + +type ProjectsDeidentifyTemplatesService struct { + s *Service +} + +func NewProjectsDlpJobsService(s *Service) *ProjectsDlpJobsService { + rs := &ProjectsDlpJobsService{s: s} + return rs +} + +type ProjectsDlpJobsService struct { + s *Service +} + +func NewProjectsImageService(s *Service) *ProjectsImageService { + rs := &ProjectsImageService{s: s} + return rs +} + +type ProjectsImageService struct { + s *Service +} + +func NewProjectsInspectTemplatesService(s *Service) *ProjectsInspectTemplatesService { + rs := &ProjectsInspectTemplatesService{s: s} + return rs +} + +type ProjectsInspectTemplatesService struct { + s *Service +} + +func NewProjectsJobTriggersService(s *Service) *ProjectsJobTriggersService { + rs := &ProjectsJobTriggersService{s: s} + return rs +} + +type ProjectsJobTriggersService struct { + s *Service +} + +// GooglePrivacyDlpV2Action: A task to execute on the completion of a +// job. +type GooglePrivacyDlpV2Action struct { + // PubSub: Publish a notification to a pubsub topic. + PubSub *GooglePrivacyDlpV2PublishToPubSub `json:"pubSub,omitempty"` + + // SaveFindings: Save resulting findings in a provided location. + SaveFindings *GooglePrivacyDlpV2SaveFindings `json:"saveFindings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PubSub") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PubSub") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Action) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Action + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails: Result of a risk +// analysis operation request. +type GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails struct { + CategoricalStatsResult *GooglePrivacyDlpV2CategoricalStatsResult `json:"categoricalStatsResult,omitempty"` + + KAnonymityResult *GooglePrivacyDlpV2KAnonymityResult `json:"kAnonymityResult,omitempty"` + + KMapEstimationResult *GooglePrivacyDlpV2KMapEstimationResult `json:"kMapEstimationResult,omitempty"` + + LDiversityResult *GooglePrivacyDlpV2LDiversityResult `json:"lDiversityResult,omitempty"` + + NumericalStatsResult *GooglePrivacyDlpV2NumericalStatsResult `json:"numericalStatsResult,omitempty"` + + // RequestedPrivacyMetric: Privacy metric to compute. + RequestedPrivacyMetric *GooglePrivacyDlpV2PrivacyMetric `json:"requestedPrivacyMetric,omitempty"` + + // RequestedSourceTable: Input dataset to compute metrics over. + RequestedSourceTable *GooglePrivacyDlpV2BigQueryTable `json:"requestedSourceTable,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "CategoricalStatsResult") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CategoricalStatsResult") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2AuxiliaryTable: An auxiliary table contains +// statistical information on the relative +// frequency of different quasi-identifiers values. It has one or +// several +// quasi-identifiers columns, and one column that indicates the +// relative +// frequency of each quasi-identifier tuple. +// If a tuple is present in the data but not in the auxiliary table, +// the +// corresponding relative frequency is assumed to be zero (and thus, +// the +// tuple is highly reidentifiable). +type GooglePrivacyDlpV2AuxiliaryTable struct { + // QuasiIds: Quasi-identifier columns. [required] + QuasiIds []*GooglePrivacyDlpV2QuasiIdField `json:"quasiIds,omitempty"` + + // RelativeFrequency: The relative frequency column must contain a + // floating-point number + // between 0 and 1 (inclusive). Null values are assumed to be + // zero. + // [required] + RelativeFrequency *GooglePrivacyDlpV2FieldId `json:"relativeFrequency,omitempty"` + + // Table: Auxiliary table location. [required] + Table *GooglePrivacyDlpV2BigQueryTable `json:"table,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuasiIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuasiIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2AuxiliaryTable) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2AuxiliaryTable + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2BigQueryKey: Row key for identifying a record in +// BigQuery table. +type GooglePrivacyDlpV2BigQueryKey struct { + // RowNumber: Absolute number of the row from the beginning of the table + // at the time + // of scanning. + RowNumber int64 `json:"rowNumber,omitempty,string"` + + // TableReference: Complete BigQuery table reference. + TableReference *GooglePrivacyDlpV2BigQueryTable `json:"tableReference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RowNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RowNumber") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2BigQueryKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2BigQueryKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2BigQueryOptions: Options defining BigQuery table +// and row identifiers. +type GooglePrivacyDlpV2BigQueryOptions struct { + // IdentifyingFields: References to fields uniquely identifying rows + // within the table. + // Nested fields in the format, like `person.birthdate.year`, are + // allowed. + IdentifyingFields []*GooglePrivacyDlpV2FieldId `json:"identifyingFields,omitempty"` + + // TableReference: Complete BigQuery table reference. + TableReference *GooglePrivacyDlpV2BigQueryTable `json:"tableReference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IdentifyingFields") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IdentifyingFields") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2BigQueryOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2BigQueryOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2BigQueryTable: Message defining the location of a +// BigQuery table. A table is uniquely +// identified by its project_id, dataset_id, and table_name. Within a +// query +// a table is often referenced with a string in the format +// of: +// `:.` +// or +// `..`. +type GooglePrivacyDlpV2BigQueryTable struct { + // DatasetId: Dataset ID of the table. + DatasetId string `json:"datasetId,omitempty"` + + // ProjectId: The Google Cloud Platform project ID of the project + // containing the table. + // If omitted, project ID is inferred from the API call. + ProjectId string `json:"projectId,omitempty"` + + // TableId: Name of the table. + TableId string `json:"tableId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2BigQueryTable) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2BigQueryTable + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2BoundingBox: Bounding box encompassing detected +// text within an image. +type GooglePrivacyDlpV2BoundingBox struct { + // Height: Height of the bounding box in pixels. + Height int64 `json:"height,omitempty"` + + // Left: Left coordinate of the bounding box. (0,0) is upper left. + Left int64 `json:"left,omitempty"` + + // Top: Top coordinate of the bounding box. (0,0) is upper left. + Top int64 `json:"top,omitempty"` + + // Width: Width of the bounding box in pixels. + Width int64 `json:"width,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Height") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Height") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2BoundingBox) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2BoundingBox + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Bucket: Bucket is represented as a range, along +// with replacement values. +type GooglePrivacyDlpV2Bucket struct { + // Max: Upper bound of the range, exclusive; type must match min. + Max *GooglePrivacyDlpV2Value `json:"max,omitempty"` + + // Min: Lower bound of the range, inclusive. Type should be the same as + // max if + // used. + Min *GooglePrivacyDlpV2Value `json:"min,omitempty"` + + // ReplacementValue: Replacement value for this bucket. If not + // provided + // the default behavior will be to hyphenate the min-max range. + ReplacementValue *GooglePrivacyDlpV2Value `json:"replacementValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Max") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Max") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Bucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Bucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2BucketingConfig: Generalization function that +// buckets values based on ranges. The ranges and +// replacement values are dynamically provided by the user for custom +// behavior, +// such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH +// This can be used on +// data of type: number, long, string, timestamp. +// If the bound `Value` type differs from the type of data being +// transformed, we +// will first attempt converting the type of the data to be transformed +// to match +// the type of the bound before comparing. +type GooglePrivacyDlpV2BucketingConfig struct { + // Buckets: Set of buckets. Ranges must be non-overlapping. + Buckets []*GooglePrivacyDlpV2Bucket `json:"buckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Buckets") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Buckets") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2BucketingConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2BucketingConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ByteContentItem: Container for bytes to inspect or +// redact. +type GooglePrivacyDlpV2ByteContentItem struct { + // Data: Content data to inspect or redact. + Data string `json:"data,omitempty"` + + // Type: The type of data stored in the bytes string. Default will be + // TEXT_UTF8. + // + // Possible values: + // "BYTES_TYPE_UNSPECIFIED" + // "IMAGE_JPEG" + // "IMAGE_BMP" + // "IMAGE_PNG" + // "IMAGE_SVG" + // "TEXT_UTF8" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Data") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Data") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ByteContentItem) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ByteContentItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CancelDlpJobRequest: The request message for +// canceling a DLP job. +type GooglePrivacyDlpV2CancelDlpJobRequest struct { +} + +// GooglePrivacyDlpV2CategoricalStatsConfig: Compute numerical stats +// over an individual column, including +// number of distinct values and value count distribution. +type GooglePrivacyDlpV2CategoricalStatsConfig struct { + // Field: Field to compute categorical stats on. All column types + // are + // supported except for arrays and structs. However, it may be + // more + // informative to use NumericalStats when the field type is + // supported, + // depending on the data. + Field *GooglePrivacyDlpV2FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CategoricalStatsConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CategoricalStatsConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2CategoricalStatsHistogramBucket struct { + // BucketSize: Total number of values in this bucket. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValueCount: Total number of distinct values in this bucket. + BucketValueCount int64 `json:"bucketValueCount,omitempty,string"` + + // BucketValues: Sample of value frequencies in this bucket. The total + // number of + // values returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2ValueFrequency `json:"bucketValues,omitempty"` + + // ValueFrequencyLowerBound: Lower bound on the value frequency of the + // values in this bucket. + ValueFrequencyLowerBound int64 `json:"valueFrequencyLowerBound,omitempty,string"` + + // ValueFrequencyUpperBound: Upper bound on the value frequency of the + // values in this bucket. + ValueFrequencyUpperBound int64 `json:"valueFrequencyUpperBound,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CategoricalStatsHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CategoricalStatsHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CategoricalStatsResult: Result of the categorical +// stats computation. +type GooglePrivacyDlpV2CategoricalStatsResult struct { + // ValueFrequencyHistogramBuckets: Histogram of value frequencies in the + // column. + ValueFrequencyHistogramBuckets []*GooglePrivacyDlpV2CategoricalStatsHistogramBucket `json:"valueFrequencyHistogramBuckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ValueFrequencyHistogramBuckets") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "ValueFrequencyHistogramBuckets") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CategoricalStatsResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CategoricalStatsResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CharacterMaskConfig: Partially mask a string by +// replacing a given number of characters with a +// fixed character. Masking can start from the beginning or end of the +// string. +// This can be used on data of any type (numbers, longs, and so on) and +// when +// de-identifying structured data we'll attempt to preserve the original +// data's +// type. (This allows you to take a long like 123 and modify it to a +// string like +// **3. +type GooglePrivacyDlpV2CharacterMaskConfig struct { + // CharactersToIgnore: When masking a string, items in this list will be + // skipped when replacing. + // For example, if your string is 555-555-5555 and you ask us to skip + // `-` and + // mask 5 chars with * we would produce ***-*55-5555. + CharactersToIgnore []*GooglePrivacyDlpV2CharsToIgnore `json:"charactersToIgnore,omitempty"` + + // MaskingCharacter: Character to mask the sensitive values—for + // example, "*" for an + // alphabetic string such as name, or "0" for a numeric string such as + // ZIP + // code or credit card number. String must have length 1. If not + // supplied, we + // will default to "*" for strings, 0 for digits. + MaskingCharacter string `json:"maskingCharacter,omitempty"` + + // NumberToMask: Number of characters to mask. If not set, all matching + // chars will be + // masked. Skipped characters do not count towards this tally. + NumberToMask int64 `json:"numberToMask,omitempty"` + + // ReverseOrder: Mask characters in reverse order. For example, if + // `masking_character` is + // '0', number_to_mask is 14, and `reverse_order` is false, + // then + // 1234-5678-9012-3456 -> 00000000000000-3456 + // If `masking_character` is '*', `number_to_mask` is 3, and + // `reverse_order` + // is true, then 12345 -> 12*** + ReverseOrder bool `json:"reverseOrder,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CharactersToIgnore") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CharactersToIgnore") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CharacterMaskConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CharacterMaskConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CharsToIgnore: Characters to skip when doing +// deidentification of a value. These will be left +// alone and skipped. +type GooglePrivacyDlpV2CharsToIgnore struct { + CharactersToSkip string `json:"charactersToSkip,omitempty"` + + // Possible values: + // "COMMON_CHARS_TO_IGNORE_UNSPECIFIED" + // "NUMERIC" - 0-9 + // "ALPHA_UPPER_CASE" - A-Z + // "ALPHA_LOWER_CASE" - a-z + // "PUNCTUATION" - US Punctuation, one of + // !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ + // "WHITESPACE" - Whitespace character, one of [ \t\n\x0B\f\r] + CommonCharactersToIgnore string `json:"commonCharactersToIgnore,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CharactersToSkip") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CharactersToSkip") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CharsToIgnore) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CharsToIgnore + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CloudStorageOptions: Options defining a file or a +// set of files (path ending with *) within +// a Google Cloud Storage bucket. +type GooglePrivacyDlpV2CloudStorageOptions struct { + // BytesLimitPerFile: Max number of bytes to scan from a file. If a + // scanned file's size is bigger + // than this value then the rest of the bytes are omitted. + BytesLimitPerFile int64 `json:"bytesLimitPerFile,omitempty,string"` + + FileSet *GooglePrivacyDlpV2FileSet `json:"fileSet,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BytesLimitPerFile") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BytesLimitPerFile") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CloudStorageOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CloudStorageOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Color: Represents a color in the RGB color space. +type GooglePrivacyDlpV2Color struct { + // Blue: The amount of blue in the color as a value in the interval [0, + // 1]. + Blue float64 `json:"blue,omitempty"` + + // Green: The amount of green in the color as a value in the interval + // [0, 1]. + Green float64 `json:"green,omitempty"` + + // Red: The amount of red in the color as a value in the interval [0, + // 1]. + Red float64 `json:"red,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Blue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Blue") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Color) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Color + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GooglePrivacyDlpV2Color) UnmarshalJSON(data []byte) error { + type NoMethod GooglePrivacyDlpV2Color + var s1 struct { + Blue gensupport.JSONFloat64 `json:"blue"` + Green gensupport.JSONFloat64 `json:"green"` + Red gensupport.JSONFloat64 `json:"red"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.Blue = float64(s1.Blue) + s.Green = float64(s1.Green) + s.Red = float64(s1.Red) + return nil +} + +// GooglePrivacyDlpV2Condition: The field type of `value` and `field` do +// not need to match to be +// considered equal, but not all comparisons are possible. +// +// A `value` of type: +// +// - `string` can be compared against all other types +// - `boolean` can only be compared against other booleans +// - `integer` can be compared against doubles or a string if the string +// value +// can be parsed as an integer. +// - `double` can be compared against integers or a string if the string +// can +// be parsed as a double. +// - `Timestamp` can be compared against strings in RFC 3339 date +// string +// format. +// - `TimeOfDay` can be compared against timestamps and strings in the +// format +// of 'HH:mm:ss'. +// +// If we fail to compare do to type mismatch, a warning will be given +// and +// the condition will evaluate to false. +type GooglePrivacyDlpV2Condition struct { + // Field: Field within the record this condition is evaluated against. + // [required] + Field *GooglePrivacyDlpV2FieldId `json:"field,omitempty"` + + // Operator: Operator used to compare the field or infoType to the + // value. [required] + // + // Possible values: + // "RELATIONAL_OPERATOR_UNSPECIFIED" + // "EQUAL_TO" - Equal. + // "NOT_EQUAL_TO" - Not equal to. + // "GREATER_THAN" - Greater than. + // "LESS_THAN" - Less than. + // "GREATER_THAN_OR_EQUALS" - Greater than or equals. + // "LESS_THAN_OR_EQUALS" - Less than or equals. + // "EXISTS" - Exists + Operator string `json:"operator,omitempty"` + + // Value: Value to compare against. [Required, except for `EXISTS` + // tests.] + Value *GooglePrivacyDlpV2Value `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Condition) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Condition + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Conditions: A collection of conditions. +type GooglePrivacyDlpV2Conditions struct { + Conditions []*GooglePrivacyDlpV2Condition `json:"conditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Conditions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Conditions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Conditions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Conditions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ContentItem: Container structure for the content to +// inspect. +type GooglePrivacyDlpV2ContentItem struct { + // ByteItem: Content data to inspect or redact. Replaces `type` and + // `data`. + ByteItem *GooglePrivacyDlpV2ByteContentItem `json:"byteItem,omitempty"` + + // Table: Structured content for inspection. + Table *GooglePrivacyDlpV2Table `json:"table,omitempty"` + + // Value: String data to inspect or redact. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ByteItem") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ByteItem") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ContentItem) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ContentItem + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ContentLocation: Findings container location data. +type GooglePrivacyDlpV2ContentLocation struct { + // ContainerName: Name of the container where the finding is + // located. + // The top level name is the source file name or table name. Nested + // names + // could be absent if the embedded object has no string identifier + // (for an example an image contained within a document). + ContainerName string `json:"containerName,omitempty"` + + // ContainerTimestamp: Findings container modification timestamp, if + // applicable. + // For Google Cloud Storage contains last file modification + // timestamp. + // For BigQuery table contains last_modified_time property. + // For Datastore - not populated. + ContainerTimestamp string `json:"containerTimestamp,omitempty"` + + // ContainerVersion: Findings container version, if + // available + // ("generation" for Google Cloud Storage). + ContainerVersion string `json:"containerVersion,omitempty"` + + // DocumentLocation: Location data for document files. + DocumentLocation *GooglePrivacyDlpV2DocumentLocation `json:"documentLocation,omitempty"` + + // ImageLocation: Location within an image's pixels. + ImageLocation *GooglePrivacyDlpV2ImageLocation `json:"imageLocation,omitempty"` + + // RecordLocation: Location within a row or record of a database table. + RecordLocation *GooglePrivacyDlpV2RecordLocation `json:"recordLocation,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContainerName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContainerName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ContentLocation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ContentLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CreateDeidentifyTemplateRequest: Request message +// for CreateDeidentifyTemplate. +type GooglePrivacyDlpV2CreateDeidentifyTemplateRequest struct { + // DeidentifyTemplate: The DeidentifyTemplate to create. + DeidentifyTemplate *GooglePrivacyDlpV2DeidentifyTemplate `json:"deidentifyTemplate,omitempty"` + + // TemplateId: The template id can contain uppercase and lowercase + // letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TemplateId string `json:"templateId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeidentifyTemplate") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeidentifyTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CreateDeidentifyTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CreateDeidentifyTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CreateDlpJobRequest: Request message for +// CreateDlpJobRequest. Used to initiate long running +// jobs such as calculating risk metrics or inspecting Google +// Cloud +// Storage. +type GooglePrivacyDlpV2CreateDlpJobRequest struct { + InspectJob *GooglePrivacyDlpV2InspectJobConfig `json:"inspectJob,omitempty"` + + // JobId: The job id can contain uppercase and lowercase + // letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + JobId string `json:"jobId,omitempty"` + + RiskJob *GooglePrivacyDlpV2RiskAnalysisJobConfig `json:"riskJob,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InspectJob") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InspectJob") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CreateDlpJobRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CreateDlpJobRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CreateInspectTemplateRequest: Request message for +// CreateInspectTemplate. +type GooglePrivacyDlpV2CreateInspectTemplateRequest struct { + // InspectTemplate: The InspectTemplate to create. + InspectTemplate *GooglePrivacyDlpV2InspectTemplate `json:"inspectTemplate,omitempty"` + + // TemplateId: The template id can contain uppercase and lowercase + // letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TemplateId string `json:"templateId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InspectTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InspectTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CreateInspectTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CreateInspectTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CreateJobTriggerRequest: Request message for +// CreateJobTrigger. +type GooglePrivacyDlpV2CreateJobTriggerRequest struct { + // JobTrigger: The JobTrigger to create. + JobTrigger *GooglePrivacyDlpV2JobTrigger `json:"jobTrigger,omitempty"` + + // TriggerId: The trigger id can contain uppercase and lowercase + // letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TriggerId string `json:"triggerId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "JobTrigger") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "JobTrigger") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CreateJobTriggerRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CreateJobTriggerRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CryptoHashConfig: Pseudonymization method that +// generates surrogates via cryptographic hashing. +// Uses SHA-256. +// The key size must be either 32 or 64 bytes. +// Outputs a 32 byte digest as an uppercase hex string +// (for example, 41D1567F7F99F1DC2A5FAB886DEE5BEE). +// Currently, only string and integer values can be hashed. +type GooglePrivacyDlpV2CryptoHashConfig struct { + // CryptoKey: The key used by the hash function. + CryptoKey *GooglePrivacyDlpV2CryptoKey `json:"cryptoKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CryptoKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CryptoKey") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CryptoHashConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CryptoHashConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CryptoKey: This is a data encryption key (DEK) (as +// opposed to +// a key encryption key (KEK) stored by KMS). +// When using KMS to wrap/unwrap DEKs, be sure to set an appropriate +// IAM policy on the KMS CryptoKey (KEK) to ensure an attacker +// cannot +// unwrap the data crypto key. +type GooglePrivacyDlpV2CryptoKey struct { + KmsWrapped *GooglePrivacyDlpV2KmsWrappedCryptoKey `json:"kmsWrapped,omitempty"` + + Transient *GooglePrivacyDlpV2TransientCryptoKey `json:"transient,omitempty"` + + Unwrapped *GooglePrivacyDlpV2UnwrappedCryptoKey `json:"unwrapped,omitempty"` + + // ForceSendFields is a list of field names (e.g. "KmsWrapped") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KmsWrapped") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CryptoKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CryptoKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig: Replaces an identifier +// with a surrogate using FPE with the FFX +// mode of operation; however when used in the `ReidentifyContent` API +// method, +// it serves the opposite function by reversing the surrogate back +// into +// the original identifier. +// The identifier must be encoded as ASCII. +// For a given crypto key and context, the same identifier will +// be +// replaced with the same surrogate. +// Identifiers must be at least two characters long. +// In the case that the identifier is the empty string, it will be +// skipped. +// See [Pseudonymization](/dlp/docs/pseudonymization) for example usage. +type GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig struct { + // Possible values: + // "FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED" + // "NUMERIC" - [0-9] (radix of 10) + // "HEXADECIMAL" - [0-9A-F] (radix of 16) + // "UPPER_CASE_ALPHA_NUMERIC" - [0-9A-Z] (radix of 36) + // "ALPHA_NUMERIC" - [0-9A-Za-z] (radix of 62) + CommonAlphabet string `json:"commonAlphabet,omitempty"` + + // Context: The 'tweak', a context may be used for higher security since + // the same + // identifier in two different contexts won't be given the same + // surrogate. If + // the context is not set, a default tweak will be used. + // + // If the context is set but: + // + // 1. there is no record present when transforming a given value or + // 1. the field is not present when transforming a given value, + // + // a default tweak will be used. + // + // Note that case (1) is expected when an `InfoTypeTransformation` + // is + // applied to both structured and non-structured + // `ContentItem`s. + // Currently, the referenced field may be of value type integer or + // string. + // + // The tweak is constructed as a sequence of bytes in big endian byte + // order + // such that: + // + // - a 64 bit integer is encoded followed by a single byte of value 1 + // - a string is encoded in UTF-8 format followed by a single byte of + // value + // å 2 + Context *GooglePrivacyDlpV2FieldId `json:"context,omitempty"` + + // CryptoKey: The key used by the encryption algorithm. [required] + CryptoKey *GooglePrivacyDlpV2CryptoKey `json:"cryptoKey,omitempty"` + + // CustomAlphabet: This is supported by mapping these to the + // alphanumeric characters + // that the FFX mode natively supports. This happens + // before/after + // encryption/decryption. + // Each character listed must appear only once. + // Number of characters must be in the range [2, 62]. + // This must be encoded as ASCII. + // The order of characters does not matter. + CustomAlphabet string `json:"customAlphabet,omitempty"` + + // Radix: The native way to select the alphabet. Must be in the range + // [2, 62]. + Radix int64 `json:"radix,omitempty"` + + // SurrogateInfoType: The custom infoType to annotate the surrogate + // with. + // This annotation will be applied to the surrogate by prefixing it + // with + // the name of the custom infoType followed by the number of + // characters comprising the surrogate. The following scheme defines + // the + // format: info_type_name(surrogate_character_count):surrogate + // + // For example, if the name of custom infoType is 'MY_TOKEN_INFO_TYPE' + // and + // the surrogate is 'abc', the full replacement value + // will be: 'MY_TOKEN_INFO_TYPE(3):abc' + // + // This annotation identifies the surrogate when inspecting content + // using the + // custom + // infoType + // [`SurrogateType`](/dlp/docs/reference/rest/v2/InspectConfig#s + // urrogatetype). + // This facilitates reversal of the surrogate when it occurs in free + // text. + // + // In order for inspection to work properly, the name of this infoType + // must + // not occur naturally anywhere in your data; otherwise, inspection + // may + // find a surrogate that does not correspond to an actual + // identifier. + // Therefore, choose your custom infoType name carefully after + // considering + // what your data looks like. One way to select a name that has a high + // chance + // of yielding reliable detection is to include one or more unicode + // characters + // that are highly improbable to exist in your data. + // For example, assuming your data is entered from a regular ASCII + // keyboard, + // the symbol with the hex code point 29DD might be used like + // so: + // ⧝MY_TOKEN_TYPE + SurrogateInfoType *GooglePrivacyDlpV2InfoType `json:"surrogateInfoType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CommonAlphabet") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CommonAlphabet") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2CustomInfoType: Custom information type provided by +// the user. Used to find domain-specific +// sensitive information configurable to the data in question. +type GooglePrivacyDlpV2CustomInfoType struct { + // DetectionRules: Set of detection rules to apply to all findings of + // this custom info type. + // Rules are applied in order that they are specified. Not supported for + // the + // `surrogate_type` custom info type. + DetectionRules []*GooglePrivacyDlpV2DetectionRule `json:"detectionRules,omitempty"` + + // Dictionary: Dictionary-based custom info type. + Dictionary *GooglePrivacyDlpV2Dictionary `json:"dictionary,omitempty"` + + // InfoType: Info type configuration. All custom info types must have + // configurations + // that do not conflict with built-in info types or other custom info + // types. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // Likelihood: Likelihood to return for this custom info type. This base + // value can be + // altered by a detection rule if the finding meets the criteria + // specified by + // the rule. Defaults to `VERY_LIKELY` if not specified. + // + // Possible values: + // "LIKELIHOOD_UNSPECIFIED" - Default value; information with all + // likelihoods is included. + // "VERY_UNLIKELY" - Few matching elements. + // "UNLIKELY" + // "POSSIBLE" - Some matching elements. + // "LIKELY" + // "VERY_LIKELY" - Many matching elements. + Likelihood string `json:"likelihood,omitempty"` + + // Regex: Regex-based custom info type. + Regex *GooglePrivacyDlpV2Regex `json:"regex,omitempty"` + + // SurrogateType: Surrogate info type. + SurrogateType *GooglePrivacyDlpV2SurrogateType `json:"surrogateType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DetectionRules") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DetectionRules") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2CustomInfoType) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2CustomInfoType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DatastoreKey: Record key for a finding in Cloud +// Datastore. +type GooglePrivacyDlpV2DatastoreKey struct { + // EntityKey: Datastore entity key. + EntityKey *GooglePrivacyDlpV2Key `json:"entityKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityKey") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DatastoreKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DatastoreKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DatastoreOptions: Options defining a data set +// within Google Cloud Datastore. +type GooglePrivacyDlpV2DatastoreOptions struct { + // Kind: The kind to process. + Kind *GooglePrivacyDlpV2KindExpression `json:"kind,omitempty"` + + // PartitionId: A partition ID identifies a grouping of entities. The + // grouping is always + // by project and namespace, however the namespace ID may be empty. + PartitionId *GooglePrivacyDlpV2PartitionId `json:"partitionId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DatastoreOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DatastoreOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DateShiftConfig: Shifts dates by random number of +// days, with option to be consistent for the +// same context. +type GooglePrivacyDlpV2DateShiftConfig struct { + // Context: Points to the field that contains the context, for example, + // an entity id. + // If set, must also set method. If set, shift will be consistent for + // the + // given context. + Context *GooglePrivacyDlpV2FieldId `json:"context,omitempty"` + + // CryptoKey: Causes the shift to be computed based on this key and the + // context. This + // results in the same shift for the same context and crypto_key. + CryptoKey *GooglePrivacyDlpV2CryptoKey `json:"cryptoKey,omitempty"` + + // LowerBoundDays: For example, -5 means shift date to at most 5 days + // back in the past. + // [Required] + LowerBoundDays int64 `json:"lowerBoundDays,omitempty"` + + // UpperBoundDays: Range of shift in days. Actual shift will be selected + // at random within this + // range (inclusive ends). Negative means shift to earlier in time. Must + // not + // be more than 365250 days (1000 years) each direction. + // + // For example, 3 means shift date to at most 3 days into the + // future. + // [Required] + UpperBoundDays int64 `json:"upperBoundDays,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Context") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Context") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DateShiftConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DateShiftConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DateTime: Message for a date time object. +type GooglePrivacyDlpV2DateTime struct { + // Date: One or more of the following must be set. All fields are + // optional, but + // when set must be valid date or time values. + Date *GoogleTypeDate `json:"date,omitempty"` + + // Possible values: + // "DAY_OF_WEEK_UNSPECIFIED" - The unspecified day-of-week. + // "MONDAY" - The day-of-week of Monday. + // "TUESDAY" - The day-of-week of Tuesday. + // "WEDNESDAY" - The day-of-week of Wednesday. + // "THURSDAY" - The day-of-week of Thursday. + // "FRIDAY" - The day-of-week of Friday. + // "SATURDAY" - The day-of-week of Saturday. + // "SUNDAY" - The day-of-week of Sunday. + DayOfWeek string `json:"dayOfWeek,omitempty"` + + Time *GoogleTypeTimeOfDay `json:"time,omitempty"` + + TimeZone *GooglePrivacyDlpV2TimeZone `json:"timeZone,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Date") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Date") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DateTime) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DateTime + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DeidentifyConfig: The configuration that controls +// how the data will change. +type GooglePrivacyDlpV2DeidentifyConfig struct { + // InfoTypeTransformations: Treat the dataset as free-form text and + // apply the same free text + // transformation everywhere. + InfoTypeTransformations *GooglePrivacyDlpV2InfoTypeTransformations `json:"infoTypeTransformations,omitempty"` + + // RecordTransformations: Treat the dataset as structured. + // Transformations can be applied to + // specific locations within structured datasets, such as transforming + // a column within a table. + RecordTransformations *GooglePrivacyDlpV2RecordTransformations `json:"recordTransformations,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InfoTypeTransformations") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoTypeTransformations") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DeidentifyConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DeidentifyConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DeidentifyContentRequest: Request to de-identify a +// list of items. +type GooglePrivacyDlpV2DeidentifyContentRequest struct { + // DeidentifyConfig: Configuration for the de-identification of the + // content item. + // Items specified here will override the template referenced by + // the + // deidentify_template_name argument. + DeidentifyConfig *GooglePrivacyDlpV2DeidentifyConfig `json:"deidentifyConfig,omitempty"` + + // DeidentifyTemplateName: Optional template to use. Any configuration + // directly specified in + // deidentify_config will override those set in the template. Singular + // fields + // that are set in this request will replace their corresponding fields + // in the + // template. Repeated fields are appended. Singular sub-messages and + // groups + // are recursively merged. + DeidentifyTemplateName string `json:"deidentifyTemplateName,omitempty"` + + // InspectConfig: Configuration for the inspector. + // Items specified here will override the template referenced by + // the + // inspect_template_name argument. + InspectConfig *GooglePrivacyDlpV2InspectConfig `json:"inspectConfig,omitempty"` + + // InspectTemplateName: Optional template to use. Any configuration + // directly specified in + // inspect_config will override those set in the template. Singular + // fields + // that are set in this request will replace their corresponding fields + // in the + // template. Repeated fields are appended. Singular sub-messages and + // groups + // are recursively merged. + InspectTemplateName string `json:"inspectTemplateName,omitempty"` + + // Item: The item to de-identify. Will be treated as text. + Item *GooglePrivacyDlpV2ContentItem `json:"item,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeidentifyConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeidentifyConfig") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DeidentifyContentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DeidentifyContentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DeidentifyContentResponse: Results of +// de-identifying a ContentItem. +type GooglePrivacyDlpV2DeidentifyContentResponse struct { + // Item: The de-identified item. + Item *GooglePrivacyDlpV2ContentItem `json:"item,omitempty"` + + // Overview: An overview of the changes that were made on the `item`. + Overview *GooglePrivacyDlpV2TransformationOverview `json:"overview,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Item") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Item") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DeidentifyContentResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DeidentifyContentResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DeidentifyTemplate: The DeidentifyTemplates +// contains instructions on how to deidentify content. +type GooglePrivacyDlpV2DeidentifyTemplate struct { + // CreateTime: The creation timestamp of a inspectTemplate, output only + // field. + CreateTime string `json:"createTime,omitempty"` + + // DeidentifyConfig: ///////////// // The core content of the template + // // /////////////// + DeidentifyConfig *GooglePrivacyDlpV2DeidentifyConfig `json:"deidentifyConfig,omitempty"` + + // Description: Short description (max 256 chars). + Description string `json:"description,omitempty"` + + // DisplayName: Display name (max 256 chars). + DisplayName string `json:"displayName,omitempty"` + + // Name: The template name. Output only. + // + // The template will have one of the following + // formats: + // `projects/PROJECT_ID/deidentifyTemplates/TEMPLATE_ID` + // OR + // `organizations/ORGANIZATION_ID/deidentifyTemplates/TEMPLATE_ID` + Name string `json:"name,omitempty"` + + // UpdateTime: The last update timestamp of a inspectTemplate, output + // only field. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DeidentifyTemplate) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DeidentifyTemplate + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DetectionRule: Rule for modifying a custom info +// type to alter behavior under certain +// circumstances, depending on the specific details of the rule. Not +// supported +// for the `surrogate_type` custom info type. +type GooglePrivacyDlpV2DetectionRule struct { + // HotwordRule: Hotword-based detection rule. + HotwordRule *GooglePrivacyDlpV2HotwordRule `json:"hotwordRule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HotwordRule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HotwordRule") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DetectionRule) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DetectionRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Dictionary: Custom information type based on a +// dictionary of words or phrases. This can +// be used to match sensitive information specific to the data, such as +// a list +// of employee IDs or job titles. +// +// Dictionary words are case-insensitive and all characters other than +// letters +// and digits in the unicode [Basic +// Multilingual +// Plane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29# +// Basic_Multilingual_Plane) +// will be replaced with whitespace when scanning for matches, so +// the +// dictionary phrase "Sam Johnson" will match all three phrases "sam +// johnson", +// "Sam, Johnson", and "Sam (Johnson)". Additionally, the +// characters +// surrounding any match must be of a different type than the +// adjacent +// characters within the word, so letters must be next to non-letters +// and +// digits next to non-digits. For example, the dictionary word "jen" +// will +// match the first three letters of the text "jen123" but will return +// no +// matches for "jennifer". +// +// Dictionary words containing a large number of characters that are +// not +// letters or digits may result in unexpected findings because such +// characters +// are treated as whitespace. +type GooglePrivacyDlpV2Dictionary struct { + // WordList: List of words or phrases to search for. + WordList *GooglePrivacyDlpV2WordList `json:"wordList,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WordList") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WordList") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Dictionary) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Dictionary + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DlpJob: Combines all of the information about a DLP +// job. +type GooglePrivacyDlpV2DlpJob struct { + // CreateTime: Time when the job was created. + CreateTime string `json:"createTime,omitempty"` + + // EndTime: Time when the job finished. + EndTime string `json:"endTime,omitempty"` + + // Errors: A stream of errors encountered running the job. + Errors []*GooglePrivacyDlpV2Error `json:"errors,omitempty"` + + // InspectDetails: Results from inspecting a data source. + InspectDetails *GooglePrivacyDlpV2InspectDataSourceDetails `json:"inspectDetails,omitempty"` + + // JobTriggerName: If created by a job trigger, the resource name of the + // trigger that + // instantiated the job. + JobTriggerName string `json:"jobTriggerName,omitempty"` + + // Name: The server-assigned name. + Name string `json:"name,omitempty"` + + // RiskDetails: Results from analyzing risk of a data source. + RiskDetails *GooglePrivacyDlpV2AnalyzeDataSourceRiskDetails `json:"riskDetails,omitempty"` + + // StartTime: Time when the job started. + StartTime string `json:"startTime,omitempty"` + + // State: State of a job. + // + // Possible values: + // "JOB_STATE_UNSPECIFIED" + // "PENDING" - The job has not yet started. + // "RUNNING" - The job is currently running. + // "DONE" - The job is no longer running. + // "CANCELED" - The job was canceled before it could complete. + // "FAILED" - The job had an error and did not complete. + State string `json:"state,omitempty"` + + // Type: The type of job. + // + // Possible values: + // "DLP_JOB_TYPE_UNSPECIFIED" + // "INSPECT_JOB" - The job inspected Google Cloud for sensitive data. + // "RISK_ANALYSIS_JOB" - The job executed a Risk Analysis computation. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DlpJob) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DlpJob + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2DocumentLocation: Location of a finding within a +// document. +type GooglePrivacyDlpV2DocumentLocation struct { + // FileOffset: Offset of the line, from the beginning of the file, where + // the finding + // is located. + FileOffset int64 `json:"fileOffset,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "FileOffset") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FileOffset") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2DocumentLocation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2DocumentLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Error: Details information about an error +// encountered during job execution or +// the results of an unsuccessful activation of the JobTrigger. +// Output only field. +type GooglePrivacyDlpV2Error struct { + Details *GoogleRpcStatus `json:"details,omitempty"` + + // Timestamps: The times the error occurred. + Timestamps []string `json:"timestamps,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Details") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Details") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Error) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Error + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Expressions: An expression, consisting or an +// operator and conditions. +type GooglePrivacyDlpV2Expressions struct { + Conditions *GooglePrivacyDlpV2Conditions `json:"conditions,omitempty"` + + // LogicalOperator: The operator to apply to the result of conditions. + // Default and currently + // only supported value is `AND`. + // + // Possible values: + // "LOGICAL_OPERATOR_UNSPECIFIED" + // "AND" + LogicalOperator string `json:"logicalOperator,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Conditions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Conditions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Expressions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Expressions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2FieldId: General identifier of a data field in a +// storage service. +type GooglePrivacyDlpV2FieldId struct { + // Name: Name describing the field. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2FieldId) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2FieldId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2FieldTransformation: The transformation to apply to +// the field. +type GooglePrivacyDlpV2FieldTransformation struct { + // Condition: Only apply the transformation if the condition evaluates + // to true for the + // given `RecordCondition`. The conditions are allowed to reference + // fields + // that are not used in the actual transformation. [optional] + // + // Example Use Cases: + // + // - Apply a different bucket transformation to an age column if the zip + // code + // column for the same record is within a specific range. + // - Redact a field if the date of birth field is greater than 85. + Condition *GooglePrivacyDlpV2RecordCondition `json:"condition,omitempty"` + + // Fields: Input field(s) to apply the transformation to. [required] + Fields []*GooglePrivacyDlpV2FieldId `json:"fields,omitempty"` + + // InfoTypeTransformations: Treat the contents of the field as free + // text, and selectively + // transform content that matches an `InfoType`. + InfoTypeTransformations *GooglePrivacyDlpV2InfoTypeTransformations `json:"infoTypeTransformations,omitempty"` + + // PrimitiveTransformation: Apply the transformation to the entire + // field. + PrimitiveTransformation *GooglePrivacyDlpV2PrimitiveTransformation `json:"primitiveTransformation,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Condition") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2FieldTransformation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2FieldTransformation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2FileSet: Set of files to scan. +type GooglePrivacyDlpV2FileSet struct { + // Url: The url, in the format `gs:///`. Trailing wildcard + // in the + // path is allowed. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Url") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Url") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2FileSet) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2FileSet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Finding: Represents a piece of potentially +// sensitive content. +type GooglePrivacyDlpV2Finding struct { + // CreateTime: Timestamp when finding was detected. + CreateTime string `json:"createTime,omitempty"` + + // InfoType: The type of content that might have been found. + // Provided if requested by the `InspectConfig`. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // Likelihood: Estimate of how likely it is that the `info_type` is + // correct. + // + // Possible values: + // "LIKELIHOOD_UNSPECIFIED" - Default value; information with all + // likelihoods is included. + // "VERY_UNLIKELY" - Few matching elements. + // "UNLIKELY" + // "POSSIBLE" - Some matching elements. + // "LIKELY" + // "VERY_LIKELY" - Many matching elements. + Likelihood string `json:"likelihood,omitempty"` + + // Location: Where the content was found. + Location *GooglePrivacyDlpV2Location `json:"location,omitempty"` + + // Quote: The content that was found. Even if the content is not + // textual, it + // may be converted to a textual representation here. + // Provided if requested by the `InspectConfig` and the finding is + // less than or equal to 4096 bytes long. If the finding exceeds 4096 + // bytes + // in length, the quote may be omitted. + Quote string `json:"quote,omitempty"` + + // QuoteInfo: Contains data parsed from quotes. Only populated if + // include_quote was set + // to true and a supported infoType was requested. Currently + // supported + // infoTypes: DATE, DATE_OF_BIRTH and TIME. + QuoteInfo *GooglePrivacyDlpV2QuoteInfo `json:"quoteInfo,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Finding) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Finding + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2FindingLimits struct { + // MaxFindingsPerInfoType: Configuration of findings limit given for + // specified infoTypes. + MaxFindingsPerInfoType []*GooglePrivacyDlpV2InfoTypeLimit `json:"maxFindingsPerInfoType,omitempty"` + + // MaxFindingsPerItem: Max number of findings that will be returned for + // each item scanned. + // When set within `InspectDataSourceRequest`, + // the maximum returned is 1000 regardless if this is set higher. + // When set within `InspectContentRequest`, this field is ignored. + MaxFindingsPerItem int64 `json:"maxFindingsPerItem,omitempty"` + + // MaxFindingsPerRequest: Max number of findings that will be returned + // per request/job. + // When set within `InspectContentRequest`, the maximum returned is + // 1000 + // regardless if this is set higher. + MaxFindingsPerRequest int64 `json:"maxFindingsPerRequest,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "MaxFindingsPerInfoType") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxFindingsPerInfoType") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2FindingLimits) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2FindingLimits + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2FixedSizeBucketingConfig: Buckets values based on +// fixed size ranges. The +// Bucketing transformation can provide all of this functionality, +// but requires more configuration. This message is provided as a +// convenience to +// the user for simple bucketing strategies. +// +// The transformed value will be a hyphenated string +// of +// -, i.e if lower_bound = 10 and upper_bound +// = 20 +// all values that are within this bucket will be replaced with +// "10-20". +// +// This can be used on data of type: double, long. +// +// If the bound Value type differs from the type of data +// being transformed, we will first attempt converting the type of the +// data to +// be transformed to match the type of the bound before comparing. +type GooglePrivacyDlpV2FixedSizeBucketingConfig struct { + // BucketSize: Size of each bucket (except for minimum and maximum + // buckets). So if + // `lower_bound` = 10, `upper_bound` = 89, and `bucket_size` = 10, then + // the + // following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, + // 50-60, + // 60-70, 70-80, 80-89, 89+. Precision up to 2 decimals works. + // [Required]. + BucketSize float64 `json:"bucketSize,omitempty"` + + // LowerBound: Lower bound value of buckets. All values less than + // `lower_bound` are + // grouped together into a single bucket; for example if `lower_bound` = + // 10, + // then all values less than 10 are replaced with the value “-10”. + // [Required]. + LowerBound *GooglePrivacyDlpV2Value `json:"lowerBound,omitempty"` + + // UpperBound: Upper bound value of buckets. All values greater than + // upper_bound are + // grouped together into a single bucket; for example if `upper_bound` = + // 89, + // then all values greater than 89 are replaced with the value + // “89+”. + // [Required]. + UpperBound *GooglePrivacyDlpV2Value `json:"upperBound,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2FixedSizeBucketingConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2FixedSizeBucketingConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GooglePrivacyDlpV2FixedSizeBucketingConfig) UnmarshalJSON(data []byte) error { + type NoMethod GooglePrivacyDlpV2FixedSizeBucketingConfig + var s1 struct { + BucketSize gensupport.JSONFloat64 `json:"bucketSize"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.BucketSize = float64(s1.BucketSize) + return nil +} + +// GooglePrivacyDlpV2HotwordRule: Detection rule that adjusts the +// likelihood of findings within a certain +// proximity of hotwords. +type GooglePrivacyDlpV2HotwordRule struct { + // HotwordRegex: Regex pattern defining what qualifies as a hotword. + HotwordRegex *GooglePrivacyDlpV2Regex `json:"hotwordRegex,omitempty"` + + // LikelihoodAdjustment: Likelihood adjustment to apply to all matching + // findings. + LikelihoodAdjustment *GooglePrivacyDlpV2LikelihoodAdjustment `json:"likelihoodAdjustment,omitempty"` + + // Proximity: Proximity of the finding within which the entire hotword + // must reside. + // The total length of the window cannot exceed 1000 characters. Note + // that + // the finding itself will be included in the window, so that hotwords + // may + // be used to match substrings of the finding itself. For example, + // the + // certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could + // be + // adjusted upwards if the area code is known to be the local area code + // of + // a company office using the hotword regex "\(xxx\)", where "xxx" + // is the area code in question. + Proximity *GooglePrivacyDlpV2Proximity `json:"proximity,omitempty"` + + // ForceSendFields is a list of field names (e.g. "HotwordRegex") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "HotwordRegex") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2HotwordRule) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2HotwordRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ImageLocation: Location of the finding within an +// image. +type GooglePrivacyDlpV2ImageLocation struct { + // BoundingBoxes: Bounding boxes locating the pixels within the image + // containing the finding. + BoundingBoxes []*GooglePrivacyDlpV2BoundingBox `json:"boundingBoxes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BoundingBoxes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BoundingBoxes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ImageLocation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ImageLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ImageRedactionConfig: Configuration for determining +// how redaction of images should occur. +type GooglePrivacyDlpV2ImageRedactionConfig struct { + // InfoType: Only one per info_type should be provided per request. If + // not + // specified, and redact_all_text is false, the DLP API will redact + // all + // text that it matches against all info_types that are found, but + // not + // specified in another ImageRedactionConfig. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // RedactAllText: If true, all text found in the image, regardless + // whether it matches an + // info_type, is redacted. + RedactAllText bool `json:"redactAllText,omitempty"` + + // RedactionColor: The color to use when redacting content from an + // image. If not specified, + // the default is black. + RedactionColor *GooglePrivacyDlpV2Color `json:"redactionColor,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InfoType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ImageRedactionConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ImageRedactionConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InfoType: Type of information detected by the API. +type GooglePrivacyDlpV2InfoType struct { + // Name: Name of the information type. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InfoType) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InfoType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InfoTypeDescription: InfoType description. +type GooglePrivacyDlpV2InfoTypeDescription struct { + // DisplayName: Human readable form of the infoType name. + DisplayName string `json:"displayName,omitempty"` + + // Name: Internal name of the infoType. + Name string `json:"name,omitempty"` + + // SupportedBy: Which parts of the API supports this InfoType. + // + // Possible values: + // "ENUM_TYPE_UNSPECIFIED" + // "INSPECT" - Supported by the inspect operations. + // "RISK_ANALYSIS" - Supported by the risk analysis operations. + SupportedBy []string `json:"supportedBy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InfoTypeDescription) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InfoTypeDescription + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InfoTypeLimit: Max findings configuration per +// infoType, per content item or long +// running DlpJob. +type GooglePrivacyDlpV2InfoTypeLimit struct { + // InfoType: Type of information the findings limit applies to. Only one + // limit per + // info_type should be provided. If InfoTypeLimit does not have + // an + // info_type, the DLP API applies the limit against all info_types + // that + // are found but not specified in another InfoTypeLimit. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // MaxFindings: Max findings limit for the given infoType. + MaxFindings int64 `json:"maxFindings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InfoType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InfoTypeLimit) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InfoTypeLimit + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InfoTypeStats: Statistics regarding a specific +// InfoType. +type GooglePrivacyDlpV2InfoTypeStats struct { + // Count: Number of findings for this infoType. + Count int64 `json:"count,omitempty,string"` + + // InfoType: The type of finding this stat is for. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InfoTypeStats) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InfoTypeStats + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InfoTypeTransformation: A transformation to apply +// to text that is identified as a specific +// info_type. +type GooglePrivacyDlpV2InfoTypeTransformation struct { + // InfoTypes: InfoTypes to apply the transformation to. Empty list will + // match all + // available infoTypes for this transformation. + InfoTypes []*GooglePrivacyDlpV2InfoType `json:"infoTypes,omitempty"` + + // PrimitiveTransformation: Primitive transformation to apply to the + // infoType. [required] + PrimitiveTransformation *GooglePrivacyDlpV2PrimitiveTransformation `json:"primitiveTransformation,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InfoTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoTypes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InfoTypeTransformation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InfoTypeTransformation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InfoTypeTransformations: A type of transformation +// that will scan unstructured text and +// apply various `PrimitiveTransformation`s to each finding, where +// the +// transformation is applied to only values that were identified as a +// specific +// info_type. +type GooglePrivacyDlpV2InfoTypeTransformations struct { + // Transformations: Transformation for each infoType. Cannot specify + // more than one + // for a given infoType. [required] + Transformations []*GooglePrivacyDlpV2InfoTypeTransformation `json:"transformations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Transformations") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Transformations") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InfoTypeTransformations) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InfoTypeTransformations + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InspectConfig: Configuration description of the +// scanning process. +// When used with redactContent only info_types and min_likelihood are +// currently +// used. +type GooglePrivacyDlpV2InspectConfig struct { + // ContentOptions: List of options defining data content to scan. + // If empty, text, images, and other content will be included. + // + // Possible values: + // "CONTENT_UNSPECIFIED" - Includes entire content of a file or a data + // stream. + // "CONTENT_TEXT" - Text content within the data, excluding any + // metadata. + // "CONTENT_IMAGE" - Images found in the data. + ContentOptions []string `json:"contentOptions,omitempty"` + + // CustomInfoTypes: Custom infoTypes provided by the user. + CustomInfoTypes []*GooglePrivacyDlpV2CustomInfoType `json:"customInfoTypes,omitempty"` + + // ExcludeInfoTypes: When true, excludes type information of the + // findings. + ExcludeInfoTypes bool `json:"excludeInfoTypes,omitempty"` + + // IncludeQuote: When true, a contextual quote from the data that + // triggered a finding is + // included in the response; see Finding.quote. + IncludeQuote bool `json:"includeQuote,omitempty"` + + // InfoTypes: Restricts what info_types to look for. The values must + // correspond to + // InfoType values returned by ListInfoTypes or found in + // documentation. + // Empty info_types runs all enabled detectors. + InfoTypes []*GooglePrivacyDlpV2InfoType `json:"infoTypes,omitempty"` + + Limits *GooglePrivacyDlpV2FindingLimits `json:"limits,omitempty"` + + // MinLikelihood: Only returns findings equal or above this threshold. + // The default is + // POSSIBLE. + // + // Possible values: + // "LIKELIHOOD_UNSPECIFIED" - Default value; information with all + // likelihoods is included. + // "VERY_UNLIKELY" - Few matching elements. + // "UNLIKELY" + // "POSSIBLE" - Some matching elements. + // "LIKELY" + // "VERY_LIKELY" - Many matching elements. + MinLikelihood string `json:"minLikelihood,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ContentOptions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ContentOptions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InspectContentRequest: Request to search for +// potentially sensitive info in a ContentItem. +type GooglePrivacyDlpV2InspectContentRequest struct { + // InspectConfig: Configuration for the inspector. What specified here + // will override + // the template referenced by the inspect_template_name argument. + InspectConfig *GooglePrivacyDlpV2InspectConfig `json:"inspectConfig,omitempty"` + + // InspectTemplateName: Optional template to use. Any configuration + // directly specified in + // inspect_config will override those set in the template. Singular + // fields + // that are set in this request will replace their corresponding fields + // in the + // template. Repeated fields are appended. Singular sub-messages and + // groups + // are recursively merged. + InspectTemplateName string `json:"inspectTemplateName,omitempty"` + + // Item: The item to inspect. + Item *GooglePrivacyDlpV2ContentItem `json:"item,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InspectConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InspectConfig") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectContentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectContentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InspectContentResponse: Results of inspecting an +// item. +type GooglePrivacyDlpV2InspectContentResponse struct { + // Result: The findings. + Result *GooglePrivacyDlpV2InspectResult `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectContentResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectContentResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InspectDataSourceDetails: The results of an inspect +// DataSource job. +type GooglePrivacyDlpV2InspectDataSourceDetails struct { + // RequestedOptions: The configuration used for this job. + RequestedOptions *GooglePrivacyDlpV2RequestedOptions `json:"requestedOptions,omitempty"` + + // Result: A summary of the outcome of this inspect job. + Result *GooglePrivacyDlpV2Result `json:"result,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequestedOptions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestedOptions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectDataSourceDetails) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectDataSourceDetails + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2InspectJobConfig struct { + // Actions: Actions to execute at the completion of the job. Are + // executed in the order + // provided. + Actions []*GooglePrivacyDlpV2Action `json:"actions,omitempty"` + + // InspectConfig: How and what to scan for. + InspectConfig *GooglePrivacyDlpV2InspectConfig `json:"inspectConfig,omitempty"` + + // InspectTemplateName: If provided, will be used as the default for all + // values in InspectConfig. + // `inspect_config` will be merged into the values persisted as part of + // the + // template. + InspectTemplateName string `json:"inspectTemplateName,omitempty"` + + // StorageConfig: The data to scan. + StorageConfig *GooglePrivacyDlpV2StorageConfig `json:"storageConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Actions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Actions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectJobConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectJobConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InspectResult: All the findings for a single +// scanned item. +type GooglePrivacyDlpV2InspectResult struct { + // Findings: List of findings for an item. + Findings []*GooglePrivacyDlpV2Finding `json:"findings,omitempty"` + + // FindingsTruncated: If true, then this item might have more findings + // than were returned, + // and the findings returned are an arbitrary subset of all + // findings. + // The findings list might be truncated because the input items were + // too + // large, or because the server reached the maximum amount of + // resources + // allowed for a single API call. For best results, divide the input + // into + // smaller batches. + FindingsTruncated bool `json:"findingsTruncated,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Findings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Findings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2InspectTemplate: The inspectTemplate contains a +// configuration (set of types of sensitive data +// to be detected) to be used anywhere you otherwise would normally +// specify +// InspectConfig. +type GooglePrivacyDlpV2InspectTemplate struct { + // CreateTime: The creation timestamp of a inspectTemplate, output only + // field. + CreateTime string `json:"createTime,omitempty"` + + // Description: Short description (max 256 chars). + Description string `json:"description,omitempty"` + + // DisplayName: Display name (max 256 chars). + DisplayName string `json:"displayName,omitempty"` + + // InspectConfig: The core content of the template. Configuration of the + // scanning process. + InspectConfig *GooglePrivacyDlpV2InspectConfig `json:"inspectConfig,omitempty"` + + // Name: The template name. Output only. + // + // The template will have one of the following + // formats: + // `projects/PROJECT_ID/inspectTemplates/TEMPLATE_ID` + // OR + // `organizations/ORGANIZATION_ID/inspectTemplates/TEMPLATE_ID` + Name string `json:"name,omitempty"` + + // UpdateTime: The last update timestamp of a inspectTemplate, output + // only field. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2InspectTemplate) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2InspectTemplate + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2JobTrigger: Contains a configuration to make dlp +// api calls on a repeating basis. +type GooglePrivacyDlpV2JobTrigger struct { + // CreateTime: The creation timestamp of a triggeredJob, output only + // field. + CreateTime string `json:"createTime,omitempty"` + + // Description: User provided description (max 256 chars) + Description string `json:"description,omitempty"` + + // DisplayName: Display name (max 100 chars) + DisplayName string `json:"displayName,omitempty"` + + // Errors: A stream of errors encountered when the trigger was + // activated. Repeated + // errors may result in the JobTrigger automaticaly being paused. + // Will return the last 100 errors. Whenever the JobTrigger is + // modified + // this list will be cleared. Output only field. + Errors []*GooglePrivacyDlpV2Error `json:"errors,omitempty"` + + InspectJob *GooglePrivacyDlpV2InspectJobConfig `json:"inspectJob,omitempty"` + + // LastRunTime: The timestamp of the last time this trigger executed, + // output only field. + LastRunTime string `json:"lastRunTime,omitempty"` + + // Name: Unique resource name for the triggeredJob, assigned by the + // service when the + // triggeredJob is created, for + // example + // `projects/dlp-test-project/triggeredJobs/53234423`. + Name string `json:"name,omitempty"` + + // Status: A status for this trigger. [required] + // + // Possible values: + // "STATUS_UNSPECIFIED" + // "HEALTHY" - Trigger is healthy. + // "PAUSED" - Trigger is temporarily paused. + // "CANCELLED" - Trigger is cancelled and can not be resumed. + Status string `json:"status,omitempty"` + + // Triggers: A list of triggers which will be OR'ed together. Only one + // in the list + // needs to trigger for a job to be started. The list may contain only + // a single Schedule trigger and must have at least one object. + Triggers []*GooglePrivacyDlpV2Trigger `json:"triggers,omitempty"` + + // UpdateTime: The last update timestamp of a triggeredJob, output only + // field. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2JobTrigger) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2JobTrigger + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KAnonymityConfig: k-anonymity metric, used for +// analysis of reidentification risk. +type GooglePrivacyDlpV2KAnonymityConfig struct { + // QuasiIds: Set of fields to compute k-anonymity over. When multiple + // fields are + // specified, they are considered a single composite key. Structs + // and + // repeated data types are not supported; however, nested fields + // are + // supported so long as they are not structs themselves or nested + // within + // a repeated field. + QuasiIds []*GooglePrivacyDlpV2FieldId `json:"quasiIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuasiIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuasiIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KAnonymityConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KAnonymityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KAnonymityEquivalenceClass: The set of columns' +// values that share the same ldiversity value +type GooglePrivacyDlpV2KAnonymityEquivalenceClass struct { + // EquivalenceClassSize: Size of the equivalence class, for example + // number of rows with the + // above set of values. + EquivalenceClassSize int64 `json:"equivalenceClassSize,omitempty,string"` + + // QuasiIdsValues: Set of values defining the equivalence class. One + // value per + // quasi-identifier column in the original KAnonymity metric + // message. + // The order is always the same as the original request. + QuasiIdsValues []*GooglePrivacyDlpV2Value `json:"quasiIdsValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EquivalenceClassSize") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EquivalenceClassSize") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KAnonymityEquivalenceClass) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KAnonymityEquivalenceClass + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2KAnonymityHistogramBucket struct { + // BucketSize: Total number of equivalence classes in this bucket. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValueCount: Total number of distinct equivalence classes in + // this bucket. + BucketValueCount int64 `json:"bucketValueCount,omitempty,string"` + + // BucketValues: Sample of equivalence classes in this bucket. The total + // number of + // classes returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2KAnonymityEquivalenceClass `json:"bucketValues,omitempty"` + + // EquivalenceClassSizeLowerBound: Lower bound on the size of the + // equivalence classes in this bucket. + EquivalenceClassSizeLowerBound int64 `json:"equivalenceClassSizeLowerBound,omitempty,string"` + + // EquivalenceClassSizeUpperBound: Upper bound on the size of the + // equivalence classes in this bucket. + EquivalenceClassSizeUpperBound int64 `json:"equivalenceClassSizeUpperBound,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KAnonymityHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KAnonymityHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KAnonymityResult: Result of the k-anonymity +// computation. +type GooglePrivacyDlpV2KAnonymityResult struct { + // EquivalenceClassHistogramBuckets: Histogram of k-anonymity + // equivalence classes. + EquivalenceClassHistogramBuckets []*GooglePrivacyDlpV2KAnonymityHistogramBucket `json:"equivalenceClassHistogramBuckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EquivalenceClassHistogramBuckets") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "EquivalenceClassHistogramBuckets") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KAnonymityResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KAnonymityResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KMapEstimationConfig: Reidentifiability metric. +// This corresponds to a risk model similar to what +// is called "journalist risk" in the literature, except the attack +// dataset is +// statistically modeled instead of being perfectly known. This can be +// done +// using publicly available data (like the US Census), or using a +// custom +// statistical model (indicated as one or several BigQuery tables), or +// by +// extrapolating from the distribution of values in the input dataset. +type GooglePrivacyDlpV2KMapEstimationConfig struct { + // AuxiliaryTables: Several auxiliary tables can be used in the + // analysis. Each custom_tag + // used to tag a quasi-identifiers column must appear in exactly one + // column + // of one auxiliary table. + AuxiliaryTables []*GooglePrivacyDlpV2AuxiliaryTable `json:"auxiliaryTables,omitempty"` + + // QuasiIds: Fields considered to be quasi-identifiers. No two columns + // can have the + // same tag. [required] + QuasiIds []*GooglePrivacyDlpV2TaggedField `json:"quasiIds,omitempty"` + + // RegionCode: ISO 3166-1 alpha-2 region code to use in the statistical + // modeling. + // Required if no column is tagged with a region-specific InfoType + // (like + // US_ZIP_5) or a region code. + RegionCode string `json:"regionCode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuxiliaryTables") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuxiliaryTables") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KMapEstimationConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KMapEstimationConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KMapEstimationHistogramBucket: A +// KMapEstimationHistogramBucket message with the following values: +// min_anonymity: 3 +// max_anonymity: 5 +// frequency: 42 +// means that there are 42 records whose quasi-identifier values +// correspond +// to 3, 4 or 5 people in the overlying population. An important +// particular +// case is when min_anonymity = max_anonymity = 1: the frequency field +// then +// corresponds to the number of uniquely identifiable records. +type GooglePrivacyDlpV2KMapEstimationHistogramBucket struct { + // BucketSize: Number of records within these anonymity bounds. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValueCount: Total number of distinct quasi-identifier tuple + // values in this bucket. + BucketValueCount int64 `json:"bucketValueCount,omitempty,string"` + + // BucketValues: Sample of quasi-identifier tuple values in this bucket. + // The total + // number of classes returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2KMapEstimationQuasiIdValues `json:"bucketValues,omitempty"` + + // MaxAnonymity: Always greater than or equal to min_anonymity. + MaxAnonymity int64 `json:"maxAnonymity,omitempty,string"` + + // MinAnonymity: Always positive. + MinAnonymity int64 `json:"minAnonymity,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KMapEstimationHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KMapEstimationHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KMapEstimationQuasiIdValues: A tuple of values for +// the quasi-identifier columns. +type GooglePrivacyDlpV2KMapEstimationQuasiIdValues struct { + // EstimatedAnonymity: The estimated anonymity for these + // quasi-identifier values. + EstimatedAnonymity int64 `json:"estimatedAnonymity,omitempty,string"` + + // QuasiIdsValues: The quasi-identifier values. + QuasiIdsValues []*GooglePrivacyDlpV2Value `json:"quasiIdsValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EstimatedAnonymity") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EstimatedAnonymity") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KMapEstimationQuasiIdValues) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KMapEstimationQuasiIdValues + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KMapEstimationResult: Result of the +// reidentifiability analysis. Note that these results are +// an +// estimation, not exact values. +type GooglePrivacyDlpV2KMapEstimationResult struct { + // KMapEstimationHistogram: The intervals [min_anonymity, max_anonymity] + // do not overlap. If a value + // doesn't correspond to any such interval, the associated frequency + // is + // zero. For example, the following records: + // {min_anonymity: 1, max_anonymity: 1, frequency: 17} + // {min_anonymity: 2, max_anonymity: 3, frequency: 42} + // {min_anonymity: 5, max_anonymity: 10, frequency: 99} + // mean that there are no record with an estimated anonymity of 4, 5, + // or + // larger than 10. + KMapEstimationHistogram []*GooglePrivacyDlpV2KMapEstimationHistogramBucket `json:"kMapEstimationHistogram,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "KMapEstimationHistogram") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KMapEstimationHistogram") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KMapEstimationResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KMapEstimationResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Key: A unique identifier for a Datastore entity. +// If a key's partition ID or any of its path kinds or names +// are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type GooglePrivacyDlpV2Key struct { + // PartitionId: Entities are partitioned into subsets, currently + // identified by a project + // ID and namespace ID. + // Queries are scoped to a single partition. + PartitionId *GooglePrivacyDlpV2PartitionId `json:"partitionId,omitempty"` + + // Path: The entity path. + // An entity path consists of one or more elements composed of a kind + // and a + // string or numerical identifier, which identify entities. The + // first + // element identifies a _root entity_, the second element identifies + // a _child_ of the root entity, the third element identifies a child of + // the + // second entity, and so forth. The entities identified by all prefixes + // of + // the path are called the element's _ancestors_. + // + // A path can never be empty, and a path can have at most 100 elements. + Path []*GooglePrivacyDlpV2PathElement `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PartitionId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PartitionId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Key) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Key + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KindExpression: A representation of a Datastore +// kind. +type GooglePrivacyDlpV2KindExpression struct { + // Name: The name of the kind. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KindExpression) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KindExpression + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2KmsWrappedCryptoKey: Include to use an existing +// data crypto key wrapped by KMS. +// Authorization requires the following IAM permissions when sending a +// request +// to perform a crypto transformation using a kms-wrapped crypto +// key: +// dlp.kms.encrypt +type GooglePrivacyDlpV2KmsWrappedCryptoKey struct { + // CryptoKeyName: The resource name of the KMS CryptoKey to use for + // unwrapping. [required] + CryptoKeyName string `json:"cryptoKeyName,omitempty"` + + // WrappedKey: The wrapped data crypto key. [required] + WrappedKey string `json:"wrappedKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CryptoKeyName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CryptoKeyName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2KmsWrappedCryptoKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2KmsWrappedCryptoKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2LDiversityConfig: l-diversity metric, used for +// analysis of reidentification risk. +type GooglePrivacyDlpV2LDiversityConfig struct { + // QuasiIds: Set of quasi-identifiers indicating how equivalence classes + // are + // defined for the l-diversity computation. When multiple fields + // are + // specified, they are considered a single composite key. + QuasiIds []*GooglePrivacyDlpV2FieldId `json:"quasiIds,omitempty"` + + // SensitiveAttribute: Sensitive field for computing the l-value. + SensitiveAttribute *GooglePrivacyDlpV2FieldId `json:"sensitiveAttribute,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuasiIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuasiIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2LDiversityConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2LDiversityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2LDiversityEquivalenceClass: The set of columns' +// values that share the same ldiversity value. +type GooglePrivacyDlpV2LDiversityEquivalenceClass struct { + // EquivalenceClassSize: Size of the k-anonymity equivalence class. + EquivalenceClassSize int64 `json:"equivalenceClassSize,omitempty,string"` + + // NumDistinctSensitiveValues: Number of distinct sensitive values in + // this equivalence class. + NumDistinctSensitiveValues int64 `json:"numDistinctSensitiveValues,omitempty,string"` + + // QuasiIdsValues: Quasi-identifier values defining the k-anonymity + // equivalence + // class. The order is always the same as the original request. + QuasiIdsValues []*GooglePrivacyDlpV2Value `json:"quasiIdsValues,omitempty"` + + // TopSensitiveValues: Estimated frequencies of top sensitive values. + TopSensitiveValues []*GooglePrivacyDlpV2ValueFrequency `json:"topSensitiveValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EquivalenceClassSize") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EquivalenceClassSize") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2LDiversityEquivalenceClass) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2LDiversityEquivalenceClass + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2LDiversityHistogramBucket struct { + // BucketSize: Total number of equivalence classes in this bucket. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValueCount: Total number of distinct equivalence classes in + // this bucket. + BucketValueCount int64 `json:"bucketValueCount,omitempty,string"` + + // BucketValues: Sample of equivalence classes in this bucket. The total + // number of + // classes returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2LDiversityEquivalenceClass `json:"bucketValues,omitempty"` + + // SensitiveValueFrequencyLowerBound: Lower bound on the sensitive value + // frequencies of the equivalence + // classes in this bucket. + SensitiveValueFrequencyLowerBound int64 `json:"sensitiveValueFrequencyLowerBound,omitempty,string"` + + // SensitiveValueFrequencyUpperBound: Upper bound on the sensitive value + // frequencies of the equivalence + // classes in this bucket. + SensitiveValueFrequencyUpperBound int64 `json:"sensitiveValueFrequencyUpperBound,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2LDiversityHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2LDiversityHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2LDiversityResult: Result of the l-diversity +// computation. +type GooglePrivacyDlpV2LDiversityResult struct { + // SensitiveValueFrequencyHistogramBuckets: Histogram of l-diversity + // equivalence class sensitive value frequencies. + SensitiveValueFrequencyHistogramBuckets []*GooglePrivacyDlpV2LDiversityHistogramBucket `json:"sensitiveValueFrequencyHistogramBuckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SensitiveValueFrequencyHistogramBuckets") to unconditionally include + // in API requests. By default, fields with empty values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "SensitiveValueFrequencyHistogramBuckets") to include in API requests + // with the JSON null value. By default, fields with empty values are + // omitted from API requests. However, any field with an empty value + // appearing in NullFields will be sent to the server as null. It is an + // error if a field in this list has a non-empty value. This may be used + // to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2LDiversityResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2LDiversityResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2LikelihoodAdjustment: Message for specifying an +// adjustment to the likelihood of a finding as +// part of a detection rule. +type GooglePrivacyDlpV2LikelihoodAdjustment struct { + // FixedLikelihood: Set the likelihood of a finding to a fixed value. + // + // Possible values: + // "LIKELIHOOD_UNSPECIFIED" - Default value; information with all + // likelihoods is included. + // "VERY_UNLIKELY" - Few matching elements. + // "UNLIKELY" + // "POSSIBLE" - Some matching elements. + // "LIKELY" + // "VERY_LIKELY" - Many matching elements. + FixedLikelihood string `json:"fixedLikelihood,omitempty"` + + // RelativeLikelihood: Increase or decrease the likelihood by the + // specified number of + // levels. For example, if a finding would be `POSSIBLE` without + // the + // detection rule and `relative_likelihood` is 1, then it is upgraded + // to + // `LIKELY`, while a value of -1 would downgrade it to + // `UNLIKELY`. + // Likelihood may never drop below `VERY_UNLIKELY` or + // exceed + // `VERY_LIKELY`, so applying an adjustment of 1 followed by + // an + // adjustment of -1 when base likelihood is `VERY_LIKELY` will result + // in + // a final likelihood of `LIKELY`. + RelativeLikelihood int64 `json:"relativeLikelihood,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FixedLikelihood") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FixedLikelihood") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2LikelihoodAdjustment) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2LikelihoodAdjustment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ListDeidentifyTemplatesResponse: Response message +// for ListDeidentifyTemplates. +type GooglePrivacyDlpV2ListDeidentifyTemplatesResponse struct { + // DeidentifyTemplates: List of deidentify templates, up to page_size + // in + // ListDeidentifyTemplatesRequest. + DeidentifyTemplates []*GooglePrivacyDlpV2DeidentifyTemplate `json:"deidentifyTemplates,omitempty"` + + // NextPageToken: If the next page is available then the next page token + // to be used + // in following ListDeidentifyTemplates request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DeidentifyTemplates") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeidentifyTemplates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ListDeidentifyTemplatesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ListDeidentifyTemplatesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ListDlpJobsResponse: The response message for +// listing DLP jobs. +type GooglePrivacyDlpV2ListDlpJobsResponse struct { + // Jobs: A list of DlpJobs that matches the specified filter in the + // request. + Jobs []*GooglePrivacyDlpV2DlpJob `json:"jobs,omitempty"` + + // NextPageToken: The standard List next-page token. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Jobs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Jobs") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ListDlpJobsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ListDlpJobsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ListInfoTypesResponse: Response to the +// ListInfoTypes request. +type GooglePrivacyDlpV2ListInfoTypesResponse struct { + // InfoTypes: Set of sensitive infoTypes. + InfoTypes []*GooglePrivacyDlpV2InfoTypeDescription `json:"infoTypes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "InfoTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoTypes") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ListInfoTypesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ListInfoTypesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ListInspectTemplatesResponse: Response message for +// ListInspectTemplates. +type GooglePrivacyDlpV2ListInspectTemplatesResponse struct { + // InspectTemplates: List of inspectTemplates, up to page_size in + // ListInspectTemplatesRequest. + InspectTemplates []*GooglePrivacyDlpV2InspectTemplate `json:"inspectTemplates,omitempty"` + + // NextPageToken: If the next page is available then the next page token + // to be used + // in following ListInspectTemplates request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "InspectTemplates") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InspectTemplates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ListInspectTemplatesResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ListInspectTemplatesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ListJobTriggersResponse: Response message for +// ListJobTriggers. +type GooglePrivacyDlpV2ListJobTriggersResponse struct { + // JobTriggers: List of triggeredJobs, up to page_size in + // ListJobTriggersRequest. + JobTriggers []*GooglePrivacyDlpV2JobTrigger `json:"jobTriggers,omitempty"` + + // NextPageToken: If the next page is available then the next page token + // to be used + // in following ListJobTriggers request. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "JobTriggers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "JobTriggers") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ListJobTriggersResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ListJobTriggersResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Location: Specifies the location of the finding. +type GooglePrivacyDlpV2Location struct { + // ByteRange: Zero-based byte offsets delimiting the finding. + // These are relative to the finding's containing element. + // Note that when the content is not textual, this references + // the UTF-8 encoded textual representation of the content. + // Omitted if content is an image. + ByteRange *GooglePrivacyDlpV2Range `json:"byteRange,omitempty"` + + // CodepointRange: Unicode character offsets delimiting the + // finding. + // These are relative to the finding's containing element. + // Provided when the content is text. + CodepointRange *GooglePrivacyDlpV2Range `json:"codepointRange,omitempty"` + + // ContentLocations: List of nested objects pointing to the precise + // location of the finding + // within the file or record. + ContentLocations []*GooglePrivacyDlpV2ContentLocation `json:"contentLocations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ByteRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ByteRange") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Location) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Location + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2NumericalStatsConfig: Compute numerical stats over +// an individual column, including +// min, max, and quantiles. +type GooglePrivacyDlpV2NumericalStatsConfig struct { + // Field: Field to compute numerical stats on. Supported types + // are + // integer, float, date, datetime, timestamp, time. + Field *GooglePrivacyDlpV2FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2NumericalStatsConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2NumericalStatsConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2NumericalStatsResult: Result of the numerical stats +// computation. +type GooglePrivacyDlpV2NumericalStatsResult struct { + // MaxValue: Maximum value appearing in the column. + MaxValue *GooglePrivacyDlpV2Value `json:"maxValue,omitempty"` + + // MinValue: Minimum value appearing in the column. + MinValue *GooglePrivacyDlpV2Value `json:"minValue,omitempty"` + + // QuantileValues: List of 99 values that partition the set of field + // values into 100 equal + // sized buckets. + QuantileValues []*GooglePrivacyDlpV2Value `json:"quantileValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2NumericalStatsResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2NumericalStatsResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2OutputStorageConfig: Cloud repository for storing +// output. +type GooglePrivacyDlpV2OutputStorageConfig struct { + // OutputSchema: Schema used for writing the findings. Columns are + // derived from the + // `Finding` object. If appending to an existing table, any columns from + // the + // predefined schema that are missing will be added. No columns in + // the + // existing table will be deleted. + // + // If unspecified, then all available columns will be used for a new + // table, + // and no changes will be made to an existing table. + // + // Possible values: + // "OUTPUT_SCHEMA_UNSPECIFIED" + // "BASIC_COLUMNS" - Basic schema including only `info_type`, `quote`, + // `certainty`, and + // `timestamp`. + // "GCS_COLUMNS" - Schema tailored to findings from scanning Google + // Cloud Storage. + // "DATASTORE_COLUMNS" - Schema tailored to findings from scanning + // Google Datastore. + // "BIG_QUERY_COLUMNS" - Schema tailored to findings from scanning + // Google BigQuery. + // "ALL_COLUMNS" - Schema containing all columns. + OutputSchema string `json:"outputSchema,omitempty"` + + // Table: Store findings in an existing table or a new table in an + // existing + // dataset. Each column in an existing table must have the same name, + // type, + // and mode of a field in the `Finding` object. If table_id is not set a + // new + // one will be generated for you with the following + // format: + // dlp_googleapis_yyyy_mm_dd_[dlp_job_id]. Pacific timezone will be used + // for + // generating the date details. + Table *GooglePrivacyDlpV2BigQueryTable `json:"table,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OutputSchema") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OutputSchema") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2OutputStorageConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2OutputStorageConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2PartitionId: Datastore partition ID. +// A partition ID identifies a grouping of entities. The grouping is +// always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +type GooglePrivacyDlpV2PartitionId struct { + // NamespaceId: If not empty, the ID of the namespace to which the + // entities belong. + NamespaceId string `json:"namespaceId,omitempty"` + + // ProjectId: The ID of the project to which the entities belong. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NamespaceId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NamespaceId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2PartitionId) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2PartitionId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2PathElement: A (kind, ID/name) pair used to +// construct a key path. +// +// If either name or ID is set, the element is complete. +// If neither is set, the element is incomplete. +type GooglePrivacyDlpV2PathElement struct { + // Id: The auto-allocated ID of the entity. + // Never equal to zero. Values less than zero are discouraged and may + // not + // be supported in the future. + Id int64 `json:"id,omitempty,string"` + + // Kind: The kind of the entity. + // A kind matching regex `__.*__` is reserved/read-only. + // A kind must not contain more than 1500 bytes when UTF-8 + // encoded. + // Cannot be "". + Kind string `json:"kind,omitempty"` + + // Name: The name of the entity. + // A name matching regex `__.*__` is reserved/read-only. + // A name must not be more than 1500 bytes when UTF-8 encoded. + // Cannot be "". + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2PathElement) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2PathElement + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2PrimitiveTransformation: A rule for transforming a +// value. +type GooglePrivacyDlpV2PrimitiveTransformation struct { + BucketingConfig *GooglePrivacyDlpV2BucketingConfig `json:"bucketingConfig,omitempty"` + + CharacterMaskConfig *GooglePrivacyDlpV2CharacterMaskConfig `json:"characterMaskConfig,omitempty"` + + CryptoHashConfig *GooglePrivacyDlpV2CryptoHashConfig `json:"cryptoHashConfig,omitempty"` + + CryptoReplaceFfxFpeConfig *GooglePrivacyDlpV2CryptoReplaceFfxFpeConfig `json:"cryptoReplaceFfxFpeConfig,omitempty"` + + DateShiftConfig *GooglePrivacyDlpV2DateShiftConfig `json:"dateShiftConfig,omitempty"` + + FixedSizeBucketingConfig *GooglePrivacyDlpV2FixedSizeBucketingConfig `json:"fixedSizeBucketingConfig,omitempty"` + + RedactConfig *GooglePrivacyDlpV2RedactConfig `json:"redactConfig,omitempty"` + + ReplaceConfig *GooglePrivacyDlpV2ReplaceValueConfig `json:"replaceConfig,omitempty"` + + ReplaceWithInfoTypeConfig *GooglePrivacyDlpV2ReplaceWithInfoTypeConfig `json:"replaceWithInfoTypeConfig,omitempty"` + + TimePartConfig *GooglePrivacyDlpV2TimePartConfig `json:"timePartConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketingConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketingConfig") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2PrimitiveTransformation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2PrimitiveTransformation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2PrivacyMetric: Privacy metric to compute for +// reidentification risk analysis. +type GooglePrivacyDlpV2PrivacyMetric struct { + CategoricalStatsConfig *GooglePrivacyDlpV2CategoricalStatsConfig `json:"categoricalStatsConfig,omitempty"` + + KAnonymityConfig *GooglePrivacyDlpV2KAnonymityConfig `json:"kAnonymityConfig,omitempty"` + + KMapEstimationConfig *GooglePrivacyDlpV2KMapEstimationConfig `json:"kMapEstimationConfig,omitempty"` + + LDiversityConfig *GooglePrivacyDlpV2LDiversityConfig `json:"lDiversityConfig,omitempty"` + + NumericalStatsConfig *GooglePrivacyDlpV2NumericalStatsConfig `json:"numericalStatsConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "CategoricalStatsConfig") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CategoricalStatsConfig") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2PrivacyMetric) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2PrivacyMetric + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Proximity: Message for specifying a window around a +// finding to apply a detection +// rule. +type GooglePrivacyDlpV2Proximity struct { + // WindowAfter: Number of characters after the finding to consider. + WindowAfter int64 `json:"windowAfter,omitempty"` + + // WindowBefore: Number of characters before the finding to consider. + WindowBefore int64 `json:"windowBefore,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WindowAfter") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WindowAfter") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Proximity) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Proximity + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2PublishToPubSub: Publish the results of a DlpJob to +// a pub sub channel. +// Compatible with: Inpect, Risk +type GooglePrivacyDlpV2PublishToPubSub struct { + // Topic: Cloud Pub/Sub topic to send notifications to. The topic must + // have given + // publishing access rights to the DLP API service account executing + // the long running DlpJob sending the notifications. + // Format is projects/{project}/topics/{topic}. + Topic string `json:"topic,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Topic") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Topic") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2PublishToPubSub) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2PublishToPubSub + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2QuasiIdField: A quasi-identifier column has a +// custom_tag, used to know which column +// in the data corresponds to which column in the statistical model. +type GooglePrivacyDlpV2QuasiIdField struct { + CustomTag string `json:"customTag,omitempty"` + + Field *GooglePrivacyDlpV2FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomTag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomTag") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2QuasiIdField) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2QuasiIdField + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2QuoteInfo: Message for infoType-dependent details +// parsed from quote. +type GooglePrivacyDlpV2QuoteInfo struct { + DateTime *GooglePrivacyDlpV2DateTime `json:"dateTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2QuoteInfo) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2QuoteInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Range: Generic half-open interval [start, end) +type GooglePrivacyDlpV2Range struct { + // End: Index of the last character of the range (exclusive). + End int64 `json:"end,omitempty,string"` + + // Start: Index of the first character of the range (inclusive). + Start int64 `json:"start,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "End") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "End") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Range) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Range + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RecordCondition: A condition for determining +// whether a transformation should be applied to +// a field. +type GooglePrivacyDlpV2RecordCondition struct { + // Expressions: An expression. + Expressions *GooglePrivacyDlpV2Expressions `json:"expressions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Expressions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Expressions") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RecordCondition) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RecordCondition + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RecordKey: Message for a unique key indicating a +// record that contains a finding. +type GooglePrivacyDlpV2RecordKey struct { + BigQueryKey *GooglePrivacyDlpV2BigQueryKey `json:"bigQueryKey,omitempty"` + + DatastoreKey *GooglePrivacyDlpV2DatastoreKey `json:"datastoreKey,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BigQueryKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BigQueryKey") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RecordKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RecordKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RecordLocation: Location of a finding within a row +// or record. +type GooglePrivacyDlpV2RecordLocation struct { + // FieldId: Field id of the field containing the finding. + FieldId *GooglePrivacyDlpV2FieldId `json:"fieldId,omitempty"` + + // RecordKey: Key of the finding. + RecordKey *GooglePrivacyDlpV2RecordKey `json:"recordKey,omitempty"` + + // TableLocation: Location within a `ContentItem.Table`. + TableLocation *GooglePrivacyDlpV2TableLocation `json:"tableLocation,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FieldId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FieldId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RecordLocation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RecordLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RecordSuppression: Configuration to suppress +// records whose suppression conditions evaluate to +// true. +type GooglePrivacyDlpV2RecordSuppression struct { + // Condition: A condition that when it evaluates to true will result in + // the record being + // evaluated to be suppressed from the transformed content. + Condition *GooglePrivacyDlpV2RecordCondition `json:"condition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Condition") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RecordSuppression) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RecordSuppression + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RecordTransformations: A type of transformation +// that is applied over structured data such as a +// table. +type GooglePrivacyDlpV2RecordTransformations struct { + // FieldTransformations: Transform the record by applying various field + // transformations. + FieldTransformations []*GooglePrivacyDlpV2FieldTransformation `json:"fieldTransformations,omitempty"` + + // RecordSuppressions: Configuration defining which records get + // suppressed entirely. Records that + // match any suppression rule are omitted from the output [optional]. + RecordSuppressions []*GooglePrivacyDlpV2RecordSuppression `json:"recordSuppressions,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "FieldTransformations") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FieldTransformations") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RecordTransformations) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RecordTransformations + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RedactConfig: Redact a given value. For example, if +// used with an `InfoTypeTransformation` +// transforming PHONE_NUMBER, and input 'My phone number is +// 206-555-0123', the +// output would be 'My phone number is '. +type GooglePrivacyDlpV2RedactConfig struct { +} + +// GooglePrivacyDlpV2RedactImageRequest: Request to search for +// potentially sensitive info in a list of items +// and replace it with a default or provided content. +type GooglePrivacyDlpV2RedactImageRequest struct { + // ByteItem: The content must be PNG, JPEG, SVG or BMP. + ByteItem *GooglePrivacyDlpV2ByteContentItem `json:"byteItem,omitempty"` + + // ImageRedactionConfigs: The configuration for specifying what content + // to redact from images. + ImageRedactionConfigs []*GooglePrivacyDlpV2ImageRedactionConfig `json:"imageRedactionConfigs,omitempty"` + + // InspectConfig: Configuration for the inspector. + InspectConfig *GooglePrivacyDlpV2InspectConfig `json:"inspectConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ByteItem") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ByteItem") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RedactImageRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RedactImageRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RedactImageResponse: Results of redacting an image. +type GooglePrivacyDlpV2RedactImageResponse struct { + // ExtractedText: If an image was being inspected and the + // InspectConfig's include_quote was + // set to true, then this field will include all text, if any, that was + // found + // in the image. + ExtractedText string `json:"extractedText,omitempty"` + + // RedactedImage: The redacted image. The type will be the same as the + // original image. + RedactedImage string `json:"redactedImage,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ExtractedText") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ExtractedText") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RedactImageResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RedactImageResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Regex: Message defining a custom regular +// expression. +type GooglePrivacyDlpV2Regex struct { + // Pattern: Pattern defining the regular expression. + Pattern string `json:"pattern,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Pattern") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Pattern") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Regex) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Regex + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ReidentifyContentRequest: Request to re-identify an +// item. +type GooglePrivacyDlpV2ReidentifyContentRequest struct { + // InspectConfig: Configuration for the inspector. + InspectConfig *GooglePrivacyDlpV2InspectConfig `json:"inspectConfig,omitempty"` + + // InspectTemplateName: Optional template to use. Any configuration + // directly specified in + // `inspect_config` will override those set in the template. Singular + // fields + // that are set in this request will replace their corresponding fields + // in the + // template. Repeated fields are appended. Singular sub-messages and + // groups + // are recursively merged. + InspectTemplateName string `json:"inspectTemplateName,omitempty"` + + // Item: The item to re-identify. Will be treated as text. + Item *GooglePrivacyDlpV2ContentItem `json:"item,omitempty"` + + // ReidentifyConfig: Configuration for the re-identification of the + // content item. + // This field shares the same proto message type that is used + // for + // de-identification, however its usage here is for the reversal of + // the + // previous de-identification. Re-identification is performed by + // examining + // the transformations used to de-identify the items and executing + // the + // reverse. This requires that only reversible transformations + // be provided here. The reversible transformations are: + // + // - `CryptoReplaceFfxFpeConfig` + ReidentifyConfig *GooglePrivacyDlpV2DeidentifyConfig `json:"reidentifyConfig,omitempty"` + + // ReidentifyTemplateName: Optional template to use. References an + // instance of `DeidentifyTemplate`. + // Any configuration directly specified in `reidentify_config` + // or + // `inspect_config` will override those set in the template. Singular + // fields + // that are set in this request will replace their corresponding fields + // in the + // template. Repeated fields are appended. Singular sub-messages and + // groups + // are recursively merged. + ReidentifyTemplateName string `json:"reidentifyTemplateName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InspectConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InspectConfig") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ReidentifyContentRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ReidentifyContentRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ReidentifyContentResponse: Results of +// re-identifying a item. +type GooglePrivacyDlpV2ReidentifyContentResponse struct { + // Item: The re-identified item. + Item *GooglePrivacyDlpV2ContentItem `json:"item,omitempty"` + + // Overview: An overview of the changes that were made to the `item`. + Overview *GooglePrivacyDlpV2TransformationOverview `json:"overview,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Item") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Item") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ReidentifyContentResponse) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ReidentifyContentResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ReplaceValueConfig: Replace each input value with a +// given `Value`. +type GooglePrivacyDlpV2ReplaceValueConfig struct { + // NewValue: Value to replace it with. + NewValue *GooglePrivacyDlpV2Value `json:"newValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NewValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NewValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ReplaceValueConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ReplaceValueConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2ReplaceWithInfoTypeConfig: Replace each matching +// finding with the name of the info_type. +type GooglePrivacyDlpV2ReplaceWithInfoTypeConfig struct { +} + +type GooglePrivacyDlpV2RequestedOptions struct { + JobConfig *GooglePrivacyDlpV2InspectJobConfig `json:"jobConfig,omitempty"` + + // SnapshotInspectTemplate: If run with an inspect template, a snapshot + // of it's state at the time of + // this run. + SnapshotInspectTemplate *GooglePrivacyDlpV2InspectTemplate `json:"snapshotInspectTemplate,omitempty"` + + // ForceSendFields is a list of field names (e.g. "JobConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "JobConfig") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RequestedOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RequestedOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2Result struct { + // InfoTypeStats: Statistics of how many instances of each info type + // were found during + // inspect job. + InfoTypeStats []*GooglePrivacyDlpV2InfoTypeStats `json:"infoTypeStats,omitempty"` + + // ProcessedBytes: Total size in bytes that were processed. + ProcessedBytes int64 `json:"processedBytes,omitempty,string"` + + // TotalEstimatedBytes: Estimate of the number of bytes to process. + TotalEstimatedBytes int64 `json:"totalEstimatedBytes,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "InfoTypeStats") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoTypeStats") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Result) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Result + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2RiskAnalysisJobConfig: Configuration for a risk +// analysis job. +type GooglePrivacyDlpV2RiskAnalysisJobConfig struct { + // Actions: Actions to execute at the completion of the job. Are + // executed in the order + // provided. + Actions []*GooglePrivacyDlpV2Action `json:"actions,omitempty"` + + // PrivacyMetric: Privacy metric to compute. + PrivacyMetric *GooglePrivacyDlpV2PrivacyMetric `json:"privacyMetric,omitempty"` + + // SourceTable: Input dataset to compute metrics over. + SourceTable *GooglePrivacyDlpV2BigQueryTable `json:"sourceTable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Actions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Actions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2RiskAnalysisJobConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2RiskAnalysisJobConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2Row struct { + Values []*GooglePrivacyDlpV2Value `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Values") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Values") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Row) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Row + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2SaveFindings: If set, the detailed findings will be +// persisted to the specified +// OutputStorageConfig. Compatible with: Inspect +type GooglePrivacyDlpV2SaveFindings struct { + OutputConfig *GooglePrivacyDlpV2OutputStorageConfig `json:"outputConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OutputConfig") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OutputConfig") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2SaveFindings) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2SaveFindings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Schedule: Schedule for triggeredJobs. +type GooglePrivacyDlpV2Schedule struct { + // RecurrencePeriodDuration: With this option a job is started a regular + // periodic basis. For + // example: every 10 minutes. + // + // A scheduled start time will be skipped if the previous + // execution has not ended when its scheduled time occurs. + // + // This value must be set to a time duration greater than or equal + // to 60 minutes and can be no longer than 60 days. + RecurrencePeriodDuration string `json:"recurrencePeriodDuration,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "RecurrencePeriodDuration") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RecurrencePeriodDuration") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Schedule) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Schedule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2StorageConfig: Shared message indicating Cloud +// storage type. +type GooglePrivacyDlpV2StorageConfig struct { + // BigQueryOptions: BigQuery options specification. + BigQueryOptions *GooglePrivacyDlpV2BigQueryOptions `json:"bigQueryOptions,omitempty"` + + // CloudStorageOptions: Google Cloud Storage options specification. + CloudStorageOptions *GooglePrivacyDlpV2CloudStorageOptions `json:"cloudStorageOptions,omitempty"` + + // DatastoreOptions: Google Cloud Datastore options specification. + DatastoreOptions *GooglePrivacyDlpV2DatastoreOptions `json:"datastoreOptions,omitempty"` + + TimespanConfig *GooglePrivacyDlpV2TimespanConfig `json:"timespanConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BigQueryOptions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BigQueryOptions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2StorageConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2StorageConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2SummaryResult: A collection that informs the user +// the number of times a particular +// `TransformationResultCode` and error details occurred. +type GooglePrivacyDlpV2SummaryResult struct { + // Possible values: + // "TRANSFORMATION_RESULT_CODE_UNSPECIFIED" + // "SUCCESS" + // "ERROR" + Code string `json:"code,omitempty"` + + Count int64 `json:"count,omitempty,string"` + + // Details: A place for warnings or errors to show up if a + // transformation didn't + // work as expected. + Details string `json:"details,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2SummaryResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2SummaryResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2SurrogateType: Message for detecting output from +// deidentification transformations +// such +// as +// [`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2/content/d +// eidentify#CryptoReplaceFfxFpeConfig). +// These types of transformations are +// those that perform pseudonymization, thereby producing a "surrogate" +// as +// output. This should be used in conjunction with a field on +// the +// transformation such as `surrogate_info_type`. This custom info type +// does +// not support the use of `detection_rules`. +type GooglePrivacyDlpV2SurrogateType struct { +} + +// GooglePrivacyDlpV2Table: Structured content to inspect. Up to 50,000 +// `Value`s per request allowed. +type GooglePrivacyDlpV2Table struct { + Headers []*GooglePrivacyDlpV2FieldId `json:"headers,omitempty"` + + Rows []*GooglePrivacyDlpV2Row `json:"rows,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Headers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Headers") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Table) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Table + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TableLocation: Location of a finding within a +// table. +type GooglePrivacyDlpV2TableLocation struct { + // RowIndex: The zero-based index of the row where the finding is + // located. + RowIndex int64 `json:"rowIndex,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "RowIndex") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RowIndex") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TableLocation) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TableLocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TaggedField: A column with a semantic tag attached. +type GooglePrivacyDlpV2TaggedField struct { + // CustomTag: A column can be tagged with a custom tag. In this case, + // the user must + // indicate an auxiliary table that contains statistical information + // on + // the possible values of this column (below). + CustomTag string `json:"customTag,omitempty"` + + // Field: Identifies the column. [required] + Field *GooglePrivacyDlpV2FieldId `json:"field,omitempty"` + + // Inferred: If no semantic tag is indicated, we infer the statistical + // model from + // the distribution of values in the input data + Inferred *GoogleProtobufEmpty `json:"inferred,omitempty"` + + // InfoType: A column can be tagged with a InfoType to use the relevant + // public + // dataset as a statistical model of population, if available. + // We + // currently support US ZIP codes, region codes, ages and genders. + // To programmatically obtain the list of supported InfoTypes, + // use + // ListInfoTypes with the supported_by=RISK_ANALYSIS filter. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomTag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomTag") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TaggedField) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TaggedField + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TimePartConfig: For use with `Date`, `Timestamp`, +// and `TimeOfDay`, extract or preserve a +// portion of the value. +type GooglePrivacyDlpV2TimePartConfig struct { + // Possible values: + // "TIME_PART_UNSPECIFIED" + // "YEAR" - [0-9999] + // "MONTH" - [1-12] + // "DAY_OF_MONTH" - [1-31] + // "DAY_OF_WEEK" - [1-7] + // "WEEK_OF_YEAR" - [1-52] + // "HOUR_OF_DAY" - [0-23] + PartToExtract string `json:"partToExtract,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PartToExtract") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PartToExtract") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TimePartConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TimePartConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type GooglePrivacyDlpV2TimeZone struct { + // OffsetMinutes: Set only if the offset can be determined. Positive for + // time ahead of UTC. + // E.g. For "UTC-9", this value is -540. + OffsetMinutes int64 `json:"offsetMinutes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OffsetMinutes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OffsetMinutes") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TimeZone) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TimeZone + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TimespanConfig: Configuration of the timespan of +// the items to include in scanning. +// Currently only supported when inspecting Google Cloud Storage and +// BigQuery. +type GooglePrivacyDlpV2TimespanConfig struct { + // EnableAutoPopulationOfTimespanConfig: When the job is started by a + // JobTrigger we will automatically figure out + // a valid start_time to avoid scanning files that have not been + // modified + // since the last time the JobTrigger executed. This will be based on + // the + // time of the execution of the last run of the JobTrigger. + EnableAutoPopulationOfTimespanConfig bool `json:"enableAutoPopulationOfTimespanConfig,omitempty"` + + // EndTime: Exclude files newer than this value. + // If set to zero, no upper time limit is applied. + EndTime string `json:"endTime,omitempty"` + + // StartTime: Exclude files older than this value. + StartTime string `json:"startTime,omitempty"` + + // TimestampField: Specification of the field containing the timestamp + // of scanned items. + // Required for data sources like Datastore or BigQuery. + // The valid data types of the timestamp field are: + // for BigQuery - timestamp, date, datetime; + // for Datastore - timestamp. + // Datastore entity will be scanned if the timestamp property does not + // exist + // or its value is empty or invalid. + TimestampField *GooglePrivacyDlpV2FieldId `json:"timestampField,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableAutoPopulationOfTimespanConfig") to unconditionally include in + // API requests. By default, fields with empty values are omitted from + // API requests. However, any non-pointer, non-interface field appearing + // in ForceSendFields will be sent to the server regardless of whether + // the field is empty or not. This may be used to include empty fields + // in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "EnableAutoPopulationOfTimespanConfig") to include in API requests + // with the JSON null value. By default, fields with empty values are + // omitted from API requests. However, any field with an empty value + // appearing in NullFields will be sent to the server as null. It is an + // error if a field in this list has a non-empty value. This may be used + // to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TimespanConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TimespanConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TransformationOverview: Overview of the +// modifications that occurred. +type GooglePrivacyDlpV2TransformationOverview struct { + // TransformationSummaries: Transformations applied to the dataset. + TransformationSummaries []*GooglePrivacyDlpV2TransformationSummary `json:"transformationSummaries,omitempty"` + + // TransformedBytes: Total size in bytes that were transformed in some + // way. + TransformedBytes int64 `json:"transformedBytes,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. + // "TransformationSummaries") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "TransformationSummaries") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TransformationOverview) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TransformationOverview + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TransformationSummary: Summary of a single +// tranformation. +// Only one of 'transformation', 'field_transformation', or +// 'record_suppress' +// will be set. +type GooglePrivacyDlpV2TransformationSummary struct { + // Field: Set if the transformation was limited to a specific FieldId. + Field *GooglePrivacyDlpV2FieldId `json:"field,omitempty"` + + // FieldTransformations: The field transformation that was applied. + // If multiple field transformations are requested for a single + // field, + // this list will contain all of them; otherwise, only one is supplied. + FieldTransformations []*GooglePrivacyDlpV2FieldTransformation `json:"fieldTransformations,omitempty"` + + // InfoType: Set if the transformation was limited to a specific + // info_type. + InfoType *GooglePrivacyDlpV2InfoType `json:"infoType,omitempty"` + + // RecordSuppress: The specific suppression option these stats apply to. + RecordSuppress *GooglePrivacyDlpV2RecordSuppression `json:"recordSuppress,omitempty"` + + Results []*GooglePrivacyDlpV2SummaryResult `json:"results,omitempty"` + + // Transformation: The specific transformation these stats apply to. + Transformation *GooglePrivacyDlpV2PrimitiveTransformation `json:"transformation,omitempty"` + + // TransformedBytes: Total size in bytes that were transformed in some + // way. + TransformedBytes int64 `json:"transformedBytes,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TransformationSummary) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TransformationSummary + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2TransientCryptoKey: Use this to have a random data +// crypto key generated. +// It will be discarded after the request finishes. +type GooglePrivacyDlpV2TransientCryptoKey struct { + // Name: Name of the key. [required] + // This is an arbitrary string used to differentiate different keys. + // A unique key is generated per name: two separate + // `TransientCryptoKey` + // protos share the same generated key if their names are the same. + // When the data crypto key is generated, this name is not used in any + // way + // (repeating the api call will result in a different key being + // generated). + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2TransientCryptoKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2TransientCryptoKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Trigger: What event needs to occur for a new job to +// be started. +type GooglePrivacyDlpV2Trigger struct { + // Schedule: Create a job on a repeating basis based on the elapse of + // time. + Schedule *GooglePrivacyDlpV2Schedule `json:"schedule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Schedule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Schedule") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Trigger) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Trigger + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2UnwrappedCryptoKey: Using raw keys is prone to +// security risks due to accidentally +// leaking the key. Choose another type of key if possible. +type GooglePrivacyDlpV2UnwrappedCryptoKey struct { + // Key: The AES 128/192/256 bit key. [required] + Key string `json:"key,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2UnwrappedCryptoKey) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2UnwrappedCryptoKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest: Request message +// for UpdateDeidentifyTemplate. +type GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest struct { + // DeidentifyTemplate: New DeidentifyTemplate value. + DeidentifyTemplate *GooglePrivacyDlpV2DeidentifyTemplate `json:"deidentifyTemplate,omitempty"` + + // UpdateMask: Mask to control which fields get updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DeidentifyTemplate") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DeidentifyTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2UpdateInspectTemplateRequest: Request message for +// UpdateInspectTemplate. +type GooglePrivacyDlpV2UpdateInspectTemplateRequest struct { + // InspectTemplate: New InspectTemplate value. + InspectTemplate *GooglePrivacyDlpV2InspectTemplate `json:"inspectTemplate,omitempty"` + + // UpdateMask: Mask to control which fields get updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InspectTemplate") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InspectTemplate") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2UpdateInspectTemplateRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2UpdateInspectTemplateRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2UpdateJobTriggerRequest: Request message for +// UpdateJobTrigger. +type GooglePrivacyDlpV2UpdateJobTriggerRequest struct { + // JobTrigger: New JobTrigger value. + JobTrigger *GooglePrivacyDlpV2JobTrigger `json:"jobTrigger,omitempty"` + + // UpdateMask: Mask to control which fields get updated. + UpdateMask string `json:"updateMask,omitempty"` + + // ForceSendFields is a list of field names (e.g. "JobTrigger") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "JobTrigger") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2UpdateJobTriggerRequest) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2UpdateJobTriggerRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2Value: Set of primitive values supported by the +// system. +// Note that for the purposes of inspection or transformation, the +// number +// of bytes considered to comprise a 'Value' is based on its +// representation +// as a UTF-8 encoded string. For example, if 'integer_value' is set +// to +// 123456789, the number of bytes would be counted as 9, even though +// an +// int64 only holds up to 8 bytes of data. +type GooglePrivacyDlpV2Value struct { + BooleanValue bool `json:"booleanValue,omitempty"` + + DateValue *GoogleTypeDate `json:"dateValue,omitempty"` + + // Possible values: + // "DAY_OF_WEEK_UNSPECIFIED" - The unspecified day-of-week. + // "MONDAY" - The day-of-week of Monday. + // "TUESDAY" - The day-of-week of Tuesday. + // "WEDNESDAY" - The day-of-week of Wednesday. + // "THURSDAY" - The day-of-week of Thursday. + // "FRIDAY" - The day-of-week of Friday. + // "SATURDAY" - The day-of-week of Saturday. + // "SUNDAY" - The day-of-week of Sunday. + DayOfWeekValue string `json:"dayOfWeekValue,omitempty"` + + FloatValue float64 `json:"floatValue,omitempty"` + + IntegerValue int64 `json:"integerValue,omitempty,string"` + + StringValue string `json:"stringValue,omitempty"` + + TimeValue *GoogleTypeTimeOfDay `json:"timeValue,omitempty"` + + TimestampValue string `json:"timestampValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BooleanValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BooleanValue") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2Value) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2Value + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GooglePrivacyDlpV2Value) UnmarshalJSON(data []byte) error { + type NoMethod GooglePrivacyDlpV2Value + var s1 struct { + FloatValue gensupport.JSONFloat64 `json:"floatValue"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FloatValue = float64(s1.FloatValue) + return nil +} + +// GooglePrivacyDlpV2ValueFrequency: A value of a field, including its +// frequency. +type GooglePrivacyDlpV2ValueFrequency struct { + // Count: How many times the value is contained in the field. + Count int64 `json:"count,omitempty,string"` + + // Value: A value contained in the field in question. + Value *GooglePrivacyDlpV2Value `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2ValueFrequency) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2ValueFrequency + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2WordList: Message defining a list of words or +// phrases to search for in the data. +type GooglePrivacyDlpV2WordList struct { + // Words: Words or phrases defining the dictionary. The dictionary must + // contain + // at least one phrase and every phrase must contain at least 2 + // characters + // that are letters or digits. [required] + Words []string `json:"words,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Words") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Words") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2WordList) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2WordList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1AuxiliaryTable: An auxiliary table contains +// statistical information on the relative +// frequency of different quasi-identifiers values. It has one or +// several +// quasi-identifiers columns, and one column that indicates the +// relative +// frequency of each quasi-identifier tuple. +// If a tuple is present in the data but not in the auxiliary table, +// the +// corresponding relative frequency is assumed to be zero (and thus, +// the +// tuple is highly reidentifiable). +type GooglePrivacyDlpV2beta1AuxiliaryTable struct { + // QuasiIds: Quasi-identifier columns. [required] + QuasiIds []*GooglePrivacyDlpV2beta1QuasiIdField `json:"quasiIds,omitempty"` + + // RelativeFrequency: The relative frequency column must contain a + // floating-point number + // between 0 and 1 (inclusive). Null values are assumed to be + // zero. + // [required] + RelativeFrequency *GooglePrivacyDlpV2beta1FieldId `json:"relativeFrequency,omitempty"` + + // Table: Auxiliary table location. [required] + Table *GooglePrivacyDlpV2beta1BigQueryTable `json:"table,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuasiIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuasiIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1AuxiliaryTable) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1AuxiliaryTable + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1BigQueryOptions: Options defining BigQuery +// table and row identifiers. +type GooglePrivacyDlpV2beta1BigQueryOptions struct { + // IdentifyingFields: References to fields uniquely identifying rows + // within the table. + // Nested fields in the format, like `person.birthdate.year`, are + // allowed. + IdentifyingFields []*GooglePrivacyDlpV2beta1FieldId `json:"identifyingFields,omitempty"` + + // TableReference: Complete BigQuery table reference. + TableReference *GooglePrivacyDlpV2beta1BigQueryTable `json:"tableReference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IdentifyingFields") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IdentifyingFields") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1BigQueryOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1BigQueryOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1BigQueryTable: Message defining the location +// of a BigQuery table. A table is uniquely +// identified by its project_id, dataset_id, and table_name. Within a +// query +// a table is often referenced with a string in the format +// of: +// `:.` +// or +// `..`. +type GooglePrivacyDlpV2beta1BigQueryTable struct { + // DatasetId: Dataset ID of the table. + DatasetId string `json:"datasetId,omitempty"` + + // ProjectId: The Google Cloud Platform project ID of the project + // containing the table. + // If omitted, project ID is inferred from the API call. + ProjectId string `json:"projectId,omitempty"` + + // TableId: Name of the table. + TableId string `json:"tableId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DatasetId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DatasetId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1BigQueryTable) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1BigQueryTable + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1CategoricalStatsConfig: Compute numerical +// stats over an individual column, including +// number of distinct values and value count distribution. +type GooglePrivacyDlpV2beta1CategoricalStatsConfig struct { + // Field: Field to compute categorical stats on. All column types + // are + // supported except for arrays and structs. However, it may be + // more + // informative to use NumericalStats when the field type is + // supported, + // depending on the data. + Field *GooglePrivacyDlpV2beta1FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1CategoricalStatsConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1CategoricalStatsConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket: Histogram +// bucket of value frequencies in the column. +type GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket struct { + // BucketSize: Total number of records in this bucket. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValues: Sample of value frequencies in this bucket. The total + // number of + // values returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2beta1ValueFrequency `json:"bucketValues,omitempty"` + + // ValueFrequencyLowerBound: Lower bound on the value frequency of the + // values in this bucket. + ValueFrequencyLowerBound int64 `json:"valueFrequencyLowerBound,omitempty,string"` + + // ValueFrequencyUpperBound: Upper bound on the value frequency of the + // values in this bucket. + ValueFrequencyUpperBound int64 `json:"valueFrequencyUpperBound,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1CategoricalStatsResult: Result of the +// categorical stats computation. +type GooglePrivacyDlpV2beta1CategoricalStatsResult struct { + // ValueFrequencyHistogramBuckets: Histogram of value frequencies in the + // column. + ValueFrequencyHistogramBuckets []*GooglePrivacyDlpV2beta1CategoricalStatsHistogramBucket `json:"valueFrequencyHistogramBuckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ValueFrequencyHistogramBuckets") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "ValueFrequencyHistogramBuckets") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1CategoricalStatsResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1CategoricalStatsResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1CloudStorageOptions: Options defining a file +// or a set of files (path ending with *) within +// a Google Cloud Storage bucket. +type GooglePrivacyDlpV2beta1CloudStorageOptions struct { + FileSet *GooglePrivacyDlpV2beta1FileSet `json:"fileSet,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FileSet") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FileSet") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1CloudStorageOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1CloudStorageOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1CloudStoragePath: A location in Cloud Storage. +type GooglePrivacyDlpV2beta1CloudStoragePath struct { + // Path: The url, in the format of `gs://bucket/`. + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Path") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Path") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1CloudStoragePath) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1CloudStoragePath + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1CustomInfoType: Custom information type +// provided by the user. Used to find domain-specific +// sensitive information configurable to the data in question. +type GooglePrivacyDlpV2beta1CustomInfoType struct { + // Dictionary: Dictionary-based custom info type. + Dictionary *GooglePrivacyDlpV2beta1Dictionary `json:"dictionary,omitempty"` + + // InfoType: Info type configuration. All custom info types must have + // configurations + // that do not conflict with built-in info types or other custom info + // types. + InfoType *GooglePrivacyDlpV2beta1InfoType `json:"infoType,omitempty"` + + // SurrogateType: Surrogate info type. + SurrogateType *GooglePrivacyDlpV2beta1SurrogateType `json:"surrogateType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Dictionary") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Dictionary") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1CustomInfoType) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1CustomInfoType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1DatastoreOptions: Options defining a data set +// within Google Cloud Datastore. +type GooglePrivacyDlpV2beta1DatastoreOptions struct { + // Kind: The kind to process. + Kind *GooglePrivacyDlpV2beta1KindExpression `json:"kind,omitempty"` + + // PartitionId: A partition ID identifies a grouping of entities. The + // grouping is always + // by project and namespace, however the namespace ID may be empty. + PartitionId *GooglePrivacyDlpV2beta1PartitionId `json:"partitionId,omitempty"` + + // Projection: Properties to scan. If none are specified, all properties + // will be scanned + // by default. + Projection []*GooglePrivacyDlpV2beta1Projection `json:"projection,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1DatastoreOptions) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1DatastoreOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1Dictionary: Custom information type based on a +// dictionary of words or phrases. This can +// be used to match sensitive information specific to the data, such as +// a list +// of employee IDs or job titles. +// +// Dictionary words are case-insensitive and all characters other than +// letters +// and digits in the unicode [Basic +// Multilingual +// Plane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29# +// Basic_Multilingual_Plane) +// will be replaced with whitespace when scanning for matches, so +// the +// dictionary phrase "Sam Johnson" will match all three phrases "sam +// johnson", +// "Sam, Johnson", and "Sam (Johnson)". Additionally, the +// characters +// surrounding any match must be of a different type than the +// adjacent +// characters within the word, so letters must be next to non-letters +// and +// digits next to non-digits. For example, the dictionary word "jen" +// will +// match the first three letters of the text "jen123" but will return +// no +// matches for "jennifer". +// +// Dictionary words containing a large number of characters that are +// not +// letters or digits may result in unexpected findings because such +// characters +// are treated as whitespace. +type GooglePrivacyDlpV2beta1Dictionary struct { + // WordList: List of words or phrases to search for. + WordList *GooglePrivacyDlpV2beta1WordList `json:"wordList,omitempty"` + + // ForceSendFields is a list of field names (e.g. "WordList") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "WordList") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1Dictionary) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1Dictionary + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1EntityId: An entity in a dataset is a field or +// set of fields that correspond to a +// single person. For example, in medical records the `EntityId` might +// be +// a patient identifier, or for financial records it might be an +// account +// identifier. This message is used when generalizations or analysis +// must be +// consistent across multiple rows pertaining to the same entity. +type GooglePrivacyDlpV2beta1EntityId struct { + // Field: Composite key indicating which field contains the entity + // identifier. + Field *GooglePrivacyDlpV2beta1FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1EntityId) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1EntityId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1FieldId: General identifier of a data field in +// a storage service. +type GooglePrivacyDlpV2beta1FieldId struct { + // ColumnName: Name describing the field. + ColumnName string `json:"columnName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ColumnName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ColumnName") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1FieldId) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1FieldId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1FileSet: Set of files to scan. +type GooglePrivacyDlpV2beta1FileSet struct { + // Url: The url, in the format `gs:///`. Trailing wildcard + // in the + // path is allowed. + Url string `json:"url,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Url") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Url") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1FileSet) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1FileSet + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1InfoType: Type of information detected by the +// API. +type GooglePrivacyDlpV2beta1InfoType struct { + // Name: Name of the information type. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1InfoType) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1InfoType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1InfoTypeLimit: Max findings configuration per +// info type, per content item or long running +// operation. +type GooglePrivacyDlpV2beta1InfoTypeLimit struct { + // InfoType: Type of information the findings limit applies to. Only one + // limit per + // info_type should be provided. If InfoTypeLimit does not have + // an + // info_type, the DLP API applies the limit against all info_types that + // are + // found but not specified in another InfoTypeLimit. + InfoType *GooglePrivacyDlpV2beta1InfoType `json:"infoType,omitempty"` + + // MaxFindings: Max findings limit for the given infoType. + MaxFindings int64 `json:"maxFindings,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InfoType") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InfoType") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1InfoTypeLimit) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1InfoTypeLimit + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1InfoTypeStatistics: Statistics regarding a +// specific InfoType. +type GooglePrivacyDlpV2beta1InfoTypeStatistics struct { + // Count: Number of findings for this info type. + Count int64 `json:"count,omitempty,string"` + + // InfoType: The type of finding this stat is for. + InfoType *GooglePrivacyDlpV2beta1InfoType `json:"infoType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1InfoTypeStatistics) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1InfoTypeStatistics + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1InspectConfig: Configuration description of +// the scanning process. +// When used with redactContent only info_types and min_likelihood are +// currently +// used. +type GooglePrivacyDlpV2beta1InspectConfig struct { + // CustomInfoTypes: Custom info types provided by the user. + CustomInfoTypes []*GooglePrivacyDlpV2beta1CustomInfoType `json:"customInfoTypes,omitempty"` + + // ExcludeTypes: When true, excludes type information of the findings. + ExcludeTypes bool `json:"excludeTypes,omitempty"` + + // IncludeQuote: When true, a contextual quote from the data that + // triggered a finding is + // included in the response; see Finding.quote. + IncludeQuote bool `json:"includeQuote,omitempty"` + + // InfoTypeLimits: Configuration of findings limit given for specified + // info types. + InfoTypeLimits []*GooglePrivacyDlpV2beta1InfoTypeLimit `json:"infoTypeLimits,omitempty"` + + // InfoTypes: Restricts what info_types to look for. The values must + // correspond to + // InfoType values returned by ListInfoTypes or found in + // documentation. + // Empty info_types runs all enabled detectors. + InfoTypes []*GooglePrivacyDlpV2beta1InfoType `json:"infoTypes,omitempty"` + + // MaxFindings: Limits the number of findings per content item or long + // running operation. + MaxFindings int64 `json:"maxFindings,omitempty"` + + // MinLikelihood: Only returns findings equal or above this threshold. + // + // Possible values: + // "LIKELIHOOD_UNSPECIFIED" - Default value; information with all + // likelihoods is included. + // "VERY_UNLIKELY" - Few matching elements. + // "UNLIKELY" + // "POSSIBLE" - Some matching elements. + // "LIKELY" + // "VERY_LIKELY" - Many matching elements. + MinLikelihood string `json:"minLikelihood,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomInfoTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomInfoTypes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1InspectConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1InspectConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1InspectOperationMetadata: Metadata returned +// within GetOperation for an inspect request. +type GooglePrivacyDlpV2beta1InspectOperationMetadata struct { + // CreateTime: The time which this request was started. + CreateTime string `json:"createTime,omitempty"` + + InfoTypeStats []*GooglePrivacyDlpV2beta1InfoTypeStatistics `json:"infoTypeStats,omitempty"` + + // ProcessedBytes: Total size in bytes that were processed. + ProcessedBytes int64 `json:"processedBytes,omitempty,string"` + + // RequestInspectConfig: The inspect config used to create the + // Operation. + RequestInspectConfig *GooglePrivacyDlpV2beta1InspectConfig `json:"requestInspectConfig,omitempty"` + + // RequestOutputConfig: Optional location to store findings. + RequestOutputConfig *GooglePrivacyDlpV2beta1OutputStorageConfig `json:"requestOutputConfig,omitempty"` + + // RequestStorageConfig: The storage config used to create the + // Operation. + RequestStorageConfig *GooglePrivacyDlpV2beta1StorageConfig `json:"requestStorageConfig,omitempty"` + + // TotalEstimatedBytes: Estimate of the number of bytes to process. + TotalEstimatedBytes int64 `json:"totalEstimatedBytes,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1InspectOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1InspectOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1InspectOperationResult: The operational data. +type GooglePrivacyDlpV2beta1InspectOperationResult struct { + // Name: The server-assigned name, which is only unique within the same + // service that + // originally returns it. If you use the default HTTP mapping, + // the + // `name` should have the format of `inspect/results/{id}`. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1InspectOperationResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1InspectOperationResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KAnonymityConfig: k-anonymity metric, used for +// analysis of reidentification risk. +type GooglePrivacyDlpV2beta1KAnonymityConfig struct { + // EntityId: Optional message indicating that each distinct entity_id + // should not + // contribute to the k-anonymity count more than once per equivalence + // class. + // If an entity_id appears on several rows with different + // quasi-identifier + // tuples, it will contribute to each count exactly once. + // + // This can lead to unexpected results. Consider a table where ID 1 + // is + // associated to quasi-identifier "foo", ID 2 to "bar", and ID 3 to + // *both* + // quasi-identifiers "foo" and "bar" (on separate rows), and where this + // ID + // is used as entity_id. Then, the anonymity value associated to ID 3 + // will + // be 2, even if it is the only ID to be associated to both values "foo" + // and + // "bar". + EntityId *GooglePrivacyDlpV2beta1EntityId `json:"entityId,omitempty"` + + // QuasiIds: Set of fields to compute k-anonymity over. When multiple + // fields are + // specified, they are considered a single composite key. Structs + // and + // repeated data types are not supported; however, nested fields + // are + // supported so long as they are not structs themselves or nested + // within + // a repeated field. + QuasiIds []*GooglePrivacyDlpV2beta1FieldId `json:"quasiIds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EntityId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EntityId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KAnonymityConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KAnonymityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass: The set of +// columns' values that share the same k-anonymity value. +type GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass struct { + // EquivalenceClassSize: Size of the equivalence class, for example + // number of rows with the + // above set of values. + EquivalenceClassSize int64 `json:"equivalenceClassSize,omitempty,string"` + + // QuasiIdsValues: Set of values defining the equivalence class. One + // value per + // quasi-identifier column in the original KAnonymity metric + // message. + // The order is always the same as the original request. + QuasiIdsValues []*GooglePrivacyDlpV2beta1Value `json:"quasiIdsValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EquivalenceClassSize") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EquivalenceClassSize") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KAnonymityHistogramBucket: Histogram bucket of +// equivalence class sizes in the table. +type GooglePrivacyDlpV2beta1KAnonymityHistogramBucket struct { + // BucketSize: Total number of records in this bucket. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValues: Sample of equivalence classes in this bucket. The total + // number of + // classes returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2beta1KAnonymityEquivalenceClass `json:"bucketValues,omitempty"` + + // EquivalenceClassSizeLowerBound: Lower bound on the size of the + // equivalence classes in this bucket. + EquivalenceClassSizeLowerBound int64 `json:"equivalenceClassSizeLowerBound,omitempty,string"` + + // EquivalenceClassSizeUpperBound: Upper bound on the size of the + // equivalence classes in this bucket. + EquivalenceClassSizeUpperBound int64 `json:"equivalenceClassSizeUpperBound,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KAnonymityHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KAnonymityHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KAnonymityResult: Result of the k-anonymity +// computation. +type GooglePrivacyDlpV2beta1KAnonymityResult struct { + // EquivalenceClassHistogramBuckets: Histogram of k-anonymity + // equivalence classes. + EquivalenceClassHistogramBuckets []*GooglePrivacyDlpV2beta1KAnonymityHistogramBucket `json:"equivalenceClassHistogramBuckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EquivalenceClassHistogramBuckets") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "EquivalenceClassHistogramBuckets") to include in API requests with + // the JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KAnonymityResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KAnonymityResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KMapEstimationConfig: Reidentifiability +// metric. This corresponds to a risk model similar to what +// is called "journalist risk" in the literature, except the attack +// dataset is +// statistically modeled instead of being perfectly known. This can be +// done +// using publicly available data (like the US Census), or using a +// custom +// statistical model (indicated as one or several BigQuery tables), or +// by +// extrapolating from the distribution of values in the input dataset. +type GooglePrivacyDlpV2beta1KMapEstimationConfig struct { + // AuxiliaryTables: Several auxiliary tables can be used in the + // analysis. Each custom_tag + // used to tag a quasi-identifiers column must appear in exactly one + // column + // of one auxiliary table. + AuxiliaryTables []*GooglePrivacyDlpV2beta1AuxiliaryTable `json:"auxiliaryTables,omitempty"` + + // QuasiIds: Fields considered to be quasi-identifiers. No two columns + // can have the + // same tag. [required] + QuasiIds []*GooglePrivacyDlpV2beta1TaggedField `json:"quasiIds,omitempty"` + + // RegionCode: ISO 3166-1 alpha-2 region code to use in the statistical + // modeling. + // Required if no column is tagged with a region-specific InfoType + // (like + // US_ZIP_5) or a region code. + RegionCode string `json:"regionCode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AuxiliaryTables") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AuxiliaryTables") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KMapEstimationConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KMapEstimationConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket: A +// KMapEstimationHistogramBucket message with the following values: +// min_anonymity: 3 +// max_anonymity: 5 +// frequency: 42 +// means that there are 42 records whose quasi-identifier values +// correspond +// to 3, 4 or 5 people in the overlying population. An important +// particular +// case is when min_anonymity = max_anonymity = 1: the frequency field +// then +// corresponds to the number of uniquely identifiable records. +type GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket struct { + // BucketSize: Number of records within these anonymity bounds. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValues: Sample of quasi-identifier tuple values in this bucket. + // The total + // number of classes returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues `json:"bucketValues,omitempty"` + + // MaxAnonymity: Always greater than or equal to min_anonymity. + MaxAnonymity int64 `json:"maxAnonymity,omitempty,string"` + + // MinAnonymity: Always positive. + MinAnonymity int64 `json:"minAnonymity,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues: A tuple of values +// for the quasi-identifier columns. +type GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues struct { + // EstimatedAnonymity: The estimated anonymity for these + // quasi-identifier values. + EstimatedAnonymity int64 `json:"estimatedAnonymity,omitempty,string"` + + // QuasiIdsValues: The quasi-identifier values. + QuasiIdsValues []*GooglePrivacyDlpV2beta1Value `json:"quasiIdsValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EstimatedAnonymity") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EstimatedAnonymity") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KMapEstimationQuasiIdValues + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KMapEstimationResult: Result of the +// reidentifiability analysis. Note that these results are +// an +// estimation, not exact values. +type GooglePrivacyDlpV2beta1KMapEstimationResult struct { + // KMapEstimationHistogram: The intervals [min_anonymity, max_anonymity] + // do not overlap. If a value + // doesn't correspond to any such interval, the associated frequency + // is + // zero. For example, the following records: + // {min_anonymity: 1, max_anonymity: 1, frequency: 17} + // {min_anonymity: 2, max_anonymity: 3, frequency: 42} + // {min_anonymity: 5, max_anonymity: 10, frequency: 99} + // mean that there are no record with an estimated anonymity of 4, 5, + // or + // larger than 10. + KMapEstimationHistogram []*GooglePrivacyDlpV2beta1KMapEstimationHistogramBucket `json:"kMapEstimationHistogram,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "KMapEstimationHistogram") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "KMapEstimationHistogram") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KMapEstimationResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KMapEstimationResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1KindExpression: A representation of a +// Datastore kind. +type GooglePrivacyDlpV2beta1KindExpression struct { + // Name: The name of the kind. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1KindExpression) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1KindExpression + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1LDiversityConfig: l-diversity metric, used for +// analysis of reidentification risk. +type GooglePrivacyDlpV2beta1LDiversityConfig struct { + // QuasiIds: Set of quasi-identifiers indicating how equivalence classes + // are + // defined for the l-diversity computation. When multiple fields + // are + // specified, they are considered a single composite key. + QuasiIds []*GooglePrivacyDlpV2beta1FieldId `json:"quasiIds,omitempty"` + + // SensitiveAttribute: Sensitive field for computing the l-value. + SensitiveAttribute *GooglePrivacyDlpV2beta1FieldId `json:"sensitiveAttribute,omitempty"` + + // ForceSendFields is a list of field names (e.g. "QuasiIds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "QuasiIds") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1LDiversityConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1LDiversityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1LDiversityEquivalenceClass: The set of +// columns' values that share the same l-diversity value. +type GooglePrivacyDlpV2beta1LDiversityEquivalenceClass struct { + // EquivalenceClassSize: Size of the k-anonymity equivalence class. + EquivalenceClassSize int64 `json:"equivalenceClassSize,omitempty,string"` + + // NumDistinctSensitiveValues: Number of distinct sensitive values in + // this equivalence class. + NumDistinctSensitiveValues int64 `json:"numDistinctSensitiveValues,omitempty,string"` + + // QuasiIdsValues: Quasi-identifier values defining the k-anonymity + // equivalence + // class. The order is always the same as the original request. + QuasiIdsValues []*GooglePrivacyDlpV2beta1Value `json:"quasiIdsValues,omitempty"` + + // TopSensitiveValues: Estimated frequencies of top sensitive values. + TopSensitiveValues []*GooglePrivacyDlpV2beta1ValueFrequency `json:"topSensitiveValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EquivalenceClassSize") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EquivalenceClassSize") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1LDiversityEquivalenceClass) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1LDiversityEquivalenceClass + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1LDiversityHistogramBucket: Histogram bucket of +// sensitive value frequencies in the table. +type GooglePrivacyDlpV2beta1LDiversityHistogramBucket struct { + // BucketSize: Total number of records in this bucket. + BucketSize int64 `json:"bucketSize,omitempty,string"` + + // BucketValues: Sample of equivalence classes in this bucket. The total + // number of + // classes returned per bucket is capped at 20. + BucketValues []*GooglePrivacyDlpV2beta1LDiversityEquivalenceClass `json:"bucketValues,omitempty"` + + // SensitiveValueFrequencyLowerBound: Lower bound on the sensitive value + // frequencies of the equivalence + // classes in this bucket. + SensitiveValueFrequencyLowerBound int64 `json:"sensitiveValueFrequencyLowerBound,omitempty,string"` + + // SensitiveValueFrequencyUpperBound: Upper bound on the sensitive value + // frequencies of the equivalence + // classes in this bucket. + SensitiveValueFrequencyUpperBound int64 `json:"sensitiveValueFrequencyUpperBound,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "BucketSize") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketSize") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1LDiversityHistogramBucket) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1LDiversityHistogramBucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1LDiversityResult: Result of the l-diversity +// computation. +type GooglePrivacyDlpV2beta1LDiversityResult struct { + // SensitiveValueFrequencyHistogramBuckets: Histogram of l-diversity + // equivalence class sensitive value frequencies. + SensitiveValueFrequencyHistogramBuckets []*GooglePrivacyDlpV2beta1LDiversityHistogramBucket `json:"sensitiveValueFrequencyHistogramBuckets,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SensitiveValueFrequencyHistogramBuckets") to unconditionally include + // in API requests. By default, fields with empty values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "SensitiveValueFrequencyHistogramBuckets") to include in API requests + // with the JSON null value. By default, fields with empty values are + // omitted from API requests. However, any field with an empty value + // appearing in NullFields will be sent to the server as null. It is an + // error if a field in this list has a non-empty value. This may be used + // to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1LDiversityResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1LDiversityResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1NumericalStatsConfig: Compute numerical stats +// over an individual column, including +// min, max, and quantiles. +type GooglePrivacyDlpV2beta1NumericalStatsConfig struct { + // Field: Field to compute numerical stats on. Supported types + // are + // integer, float, date, datetime, timestamp, time. + Field *GooglePrivacyDlpV2beta1FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Field") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Field") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1NumericalStatsConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1NumericalStatsConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1NumericalStatsResult: Result of the numerical +// stats computation. +type GooglePrivacyDlpV2beta1NumericalStatsResult struct { + // MaxValue: Maximum value appearing in the column. + MaxValue *GooglePrivacyDlpV2beta1Value `json:"maxValue,omitempty"` + + // MinValue: Minimum value appearing in the column. + MinValue *GooglePrivacyDlpV2beta1Value `json:"minValue,omitempty"` + + // QuantileValues: List of 99 values that partition the set of field + // values into 100 equal + // sized buckets. + QuantileValues []*GooglePrivacyDlpV2beta1Value `json:"quantileValues,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1NumericalStatsResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1NumericalStatsResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1OutputStorageConfig: Cloud repository for +// storing output. +type GooglePrivacyDlpV2beta1OutputStorageConfig struct { + // StoragePath: The path to a Google Cloud Storage location to store + // output. + // The bucket must already exist and + // the Google APIs service account for DLP must have write permission + // to + // write to the given bucket. + // Results are split over multiple csv files with each file name + // matching + // the pattern "[operation_id]_[count].csv", for + // example + // `3094877188788974909_1.csv`. The `operation_id` matches + // the + // identifier for the Operation, and the `count` is a counter used + // for + // tracking the number of files written. + // + // The CSV file(s) contain the following columns regardless of storage + // type + // scanned: + // - id + // - info_type + // - likelihood + // - byte size of finding + // - quote + // - timestamp + // + // For Cloud Storage the next columns are: + // + // - file_path + // - start_offset + // + // For Cloud Datastore the next columns are: + // + // - project_id + // - namespace_id + // - path + // - column_name + // - offset + // + // For BigQuery the next columns are: + // + // - row_number + // - project_id + // - dataset_id + // - table_id + StoragePath *GooglePrivacyDlpV2beta1CloudStoragePath `json:"storagePath,omitempty"` + + // Table: Store findings in a new table in the dataset. + Table *GooglePrivacyDlpV2beta1BigQueryTable `json:"table,omitempty"` + + // ForceSendFields is a list of field names (e.g. "StoragePath") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StoragePath") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1OutputStorageConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1OutputStorageConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1PartitionId: Datastore partition ID. +// A partition ID identifies a grouping of entities. The grouping is +// always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +type GooglePrivacyDlpV2beta1PartitionId struct { + // NamespaceId: If not empty, the ID of the namespace to which the + // entities belong. + NamespaceId string `json:"namespaceId,omitempty"` + + // ProjectId: The ID of the project to which the entities belong. + ProjectId string `json:"projectId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NamespaceId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NamespaceId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1PartitionId) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1PartitionId + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1PrivacyMetric: Privacy metric to compute for +// reidentification risk analysis. +type GooglePrivacyDlpV2beta1PrivacyMetric struct { + CategoricalStatsConfig *GooglePrivacyDlpV2beta1CategoricalStatsConfig `json:"categoricalStatsConfig,omitempty"` + + KAnonymityConfig *GooglePrivacyDlpV2beta1KAnonymityConfig `json:"kAnonymityConfig,omitempty"` + + KMapEstimationConfig *GooglePrivacyDlpV2beta1KMapEstimationConfig `json:"kMapEstimationConfig,omitempty"` + + LDiversityConfig *GooglePrivacyDlpV2beta1LDiversityConfig `json:"lDiversityConfig,omitempty"` + + NumericalStatsConfig *GooglePrivacyDlpV2beta1NumericalStatsConfig `json:"numericalStatsConfig,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "CategoricalStatsConfig") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CategoricalStatsConfig") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1PrivacyMetric) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1PrivacyMetric + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1Projection: A representation of a Datastore +// property in a projection. +type GooglePrivacyDlpV2beta1Projection struct { + // Property: The property to project. + Property *GooglePrivacyDlpV2beta1PropertyReference `json:"property,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Property") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Property") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1Projection) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1Projection + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1PropertyReference: A reference to a property +// relative to the Datastore kind expressions. +type GooglePrivacyDlpV2beta1PropertyReference struct { + // Name: The name of the property. + // If name includes "."s, it may be interpreted as a property name path. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1PropertyReference) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1PropertyReference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1QuasiIdField: A quasi-identifier column has a +// custom_tag, used to know which column +// in the data corresponds to which column in the statistical model. +type GooglePrivacyDlpV2beta1QuasiIdField struct { + CustomTag string `json:"customTag,omitempty"` + + Field *GooglePrivacyDlpV2beta1FieldId `json:"field,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomTag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomTag") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1QuasiIdField) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1QuasiIdField + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1RiskAnalysisOperationMetadata: Metadata +// returned within +// the +// [`riskAnalysis.operations.get`](/dlp/docs/reference/rest/v2beta1/r +// iskAnalysis.operations/get) +// for risk analysis. +type GooglePrivacyDlpV2beta1RiskAnalysisOperationMetadata struct { + // CreateTime: The time which this request was started. + CreateTime string `json:"createTime,omitempty"` + + // RequestedPrivacyMetric: Privacy metric to compute. + RequestedPrivacyMetric *GooglePrivacyDlpV2beta1PrivacyMetric `json:"requestedPrivacyMetric,omitempty"` + + // RequestedSourceTable: Input dataset to compute metrics over. + RequestedSourceTable *GooglePrivacyDlpV2beta1BigQueryTable `json:"requestedSourceTable,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1RiskAnalysisOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1RiskAnalysisOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1RiskAnalysisOperationResult: Result of a risk +// analysis +// [`Operation`](/dlp/docs/reference/rest/v2beta1/inspect.operat +// ions) +// request. +type GooglePrivacyDlpV2beta1RiskAnalysisOperationResult struct { + CategoricalStatsResult *GooglePrivacyDlpV2beta1CategoricalStatsResult `json:"categoricalStatsResult,omitempty"` + + KAnonymityResult *GooglePrivacyDlpV2beta1KAnonymityResult `json:"kAnonymityResult,omitempty"` + + KMapEstimationResult *GooglePrivacyDlpV2beta1KMapEstimationResult `json:"kMapEstimationResult,omitempty"` + + LDiversityResult *GooglePrivacyDlpV2beta1LDiversityResult `json:"lDiversityResult,omitempty"` + + NumericalStatsResult *GooglePrivacyDlpV2beta1NumericalStatsResult `json:"numericalStatsResult,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "CategoricalStatsResult") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CategoricalStatsResult") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1RiskAnalysisOperationResult) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1RiskAnalysisOperationResult + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1StorageConfig: Shared message indicating Cloud +// storage type. +type GooglePrivacyDlpV2beta1StorageConfig struct { + // BigQueryOptions: BigQuery options specification. + BigQueryOptions *GooglePrivacyDlpV2beta1BigQueryOptions `json:"bigQueryOptions,omitempty"` + + // CloudStorageOptions: Google Cloud Storage options specification. + CloudStorageOptions *GooglePrivacyDlpV2beta1CloudStorageOptions `json:"cloudStorageOptions,omitempty"` + + // DatastoreOptions: Google Cloud Datastore options specification. + DatastoreOptions *GooglePrivacyDlpV2beta1DatastoreOptions `json:"datastoreOptions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BigQueryOptions") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BigQueryOptions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1StorageConfig) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1StorageConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1SurrogateType: Message for detecting output +// from deidentification transformations +// such +// as +// [`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2beta1/cont +// ent/deidentify#CryptoReplaceFfxFpeConfig). +// These types of transformations are +// those that perform pseudonymization, thereby producing a "surrogate" +// as +// output. This should be used in conjunction with a field on +// the +// transformation such as `surrogate_info_type`. This custom info type +// does +// not support the use of `detection_rules`. +type GooglePrivacyDlpV2beta1SurrogateType struct { +} + +// GooglePrivacyDlpV2beta1TaggedField: A column with a semantic tag +// attached. +type GooglePrivacyDlpV2beta1TaggedField struct { + // CustomTag: A column can be tagged with a custom tag. In this case, + // the user must + // indicate an auxiliary table that contains statistical information + // on + // the possible values of this column (below). + CustomTag string `json:"customTag,omitempty"` + + // Field: Identifies the column. [required] + Field *GooglePrivacyDlpV2beta1FieldId `json:"field,omitempty"` + + // Inferred: If no semantic tag is indicated, we infer the statistical + // model from + // the distribution of values in the input data + Inferred *GoogleProtobufEmpty `json:"inferred,omitempty"` + + // InfoType: A column can be tagged with a InfoType to use the relevant + // public + // dataset as a statistical model of population, if available. + // We + // currently support US ZIP codes, region codes, ages and genders. + InfoType *GooglePrivacyDlpV2beta1InfoType `json:"infoType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CustomTag") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomTag") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1TaggedField) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1TaggedField + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1Value: Set of primitive values supported by +// the system. +// Note that for the purposes of inspection or transformation, the +// number +// of bytes considered to comprise a 'Value' is based on its +// representation +// as a UTF-8 encoded string. For example, if 'integer_value' is set +// to +// 123456789, the number of bytes would be counted as 9, even though +// an +// int64 only holds up to 8 bytes of data. +type GooglePrivacyDlpV2beta1Value struct { + BooleanValue bool `json:"booleanValue,omitempty"` + + DateValue *GoogleTypeDate `json:"dateValue,omitempty"` + + FloatValue float64 `json:"floatValue,omitempty"` + + IntegerValue int64 `json:"integerValue,omitempty,string"` + + StringValue string `json:"stringValue,omitempty"` + + TimeValue *GoogleTypeTimeOfDay `json:"timeValue,omitempty"` + + TimestampValue string `json:"timestampValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BooleanValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BooleanValue") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1Value) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1Value + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *GooglePrivacyDlpV2beta1Value) UnmarshalJSON(data []byte) error { + type NoMethod GooglePrivacyDlpV2beta1Value + var s1 struct { + FloatValue gensupport.JSONFloat64 `json:"floatValue"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.FloatValue = float64(s1.FloatValue) + return nil +} + +// GooglePrivacyDlpV2beta1ValueFrequency: A value of a field, including +// its frequency. +type GooglePrivacyDlpV2beta1ValueFrequency struct { + // Count: How many times the value is contained in the field. + Count int64 `json:"count,omitempty,string"` + + // Value: A value contained in the field in question. + Value *GooglePrivacyDlpV2beta1Value `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Count") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Count") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1ValueFrequency) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1ValueFrequency + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GooglePrivacyDlpV2beta1WordList: Message defining a list of words or +// phrases to search for in the data. +type GooglePrivacyDlpV2beta1WordList struct { + // Words: Words or phrases defining the dictionary. The dictionary must + // contain + // at least one phrase and every phrase must contain at least 2 + // characters + // that are letters or digits. [required] + Words []string `json:"words,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Words") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Words") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GooglePrivacyDlpV2beta1WordList) MarshalJSON() ([]byte, error) { + type NoMethod GooglePrivacyDlpV2beta1WordList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleProtobufEmpty: A generic empty message that you can re-use to +// avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the +// request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns +// (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type GoogleProtobufEmpty struct { + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` +} + +// GoogleRpcStatus: The `Status` type defines a logical error model that +// is suitable for different +// programming environments, including REST APIs and RPC APIs. It is +// used by +// [gRPC](https://github.com/grpc). The error model is designed to +// be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, +// and error details. The error code should be an enum value +// of +// google.rpc.Code, but it may accept additional error codes if needed. +// The +// error message should be a developer-facing English message that +// helps +// developers *understand* and *resolve* the error. If a localized +// user-facing +// error message is needed, put the localized message in the error +// details or +// localize it in the client. The optional error details may contain +// arbitrary +// information about the error. There is a predefined set of error +// detail types +// in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error +// model, but it +// is not necessarily the actual wire format. When the `Status` message +// is +// exposed in different client libraries and different wire protocols, +// it can be +// mapped differently. For example, it will likely be mapped to some +// exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety +// of +// environments, either with or without APIs, to provide a +// consistent developer experience across different +// environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the +// client, +// it may embed the `Status` in the normal response to indicate the +// partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step +// may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch +// response, the +// `Status` message should be used directly inside batch response, +// one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous +// operation +// results in its response, the status of those operations should +// be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message +// `Status` could +// be used directly after any stripping needed for security/privacy +// reasons. +type GoogleRpcStatus struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of + // message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any + // user-facing error message should be localized and sent in + // the + // google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { + type NoMethod GoogleRpcStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleTypeDate: Represents a whole calendar date, e.g. date of birth. +// The time of day and +// time zone are either specified elsewhere or are not significant. The +// date +// is relative to the Proleptic Gregorian Calendar. The day may be 0 +// to +// represent a year and month where the day is not significant, e.g. +// credit card +// expiration date. The year may be 0 to represent a month and day +// independent +// of year, e.g. anniversary date. Related types are +// google.type.TimeOfDay +// and `google.protobuf.Timestamp`. +type GoogleTypeDate struct { + // Day: Day of month. Must be from 1 to 31 and valid for the year and + // month, or 0 + // if specifying a year/month where the day is not significant. + Day int64 `json:"day,omitempty"` + + // Month: Month of year. Must be from 1 to 12. + Month int64 `json:"month,omitempty"` + + // Year: Year of date. Must be from 1 to 9999, or 0 if specifying a date + // without + // a year. + Year int64 `json:"year,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Day") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Day") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleTypeDate) MarshalJSON() ([]byte, error) { + type NoMethod GoogleTypeDate + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleTypeTimeOfDay: Represents a time of day. The date and time zone +// are either not significant +// or are specified elsewhere. An API may choose to allow leap seconds. +// Related +// types are google.type.Date and `google.protobuf.Timestamp`. +type GoogleTypeTimeOfDay struct { + // Hours: Hours of day in 24 hour format. Should be from 0 to 23. An API + // may choose + // to allow the value "24:00:00" for scenarios like business closing + // time. + Hours int64 `json:"hours,omitempty"` + + // Minutes: Minutes of hour of day. Must be from 0 to 59. + Minutes int64 `json:"minutes,omitempty"` + + // Nanos: Fractions of seconds in nanoseconds. Must be from 0 to + // 999,999,999. + Nanos int64 `json:"nanos,omitempty"` + + // Seconds: Seconds of minutes of the time. Must normally be from 0 to + // 59. An API may + // allow the value 60 if it allows leap-seconds. + Seconds int64 `json:"seconds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Hours") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Hours") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleTypeTimeOfDay) MarshalJSON() ([]byte, error) { + type NoMethod GoogleTypeTimeOfDay + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "dlp.infoTypes.list": + +type InfoTypesListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Returns sensitive information types DLP supports. +func (r *InfoTypesService) List() *InfoTypesListCall { + c := &InfoTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + return c +} + +// Filter sets the optional parameter "filter": Optional filter to only +// return infoTypes supported by certain parts of the +// API. Defaults to supported_by=INSPECT. +func (c *InfoTypesListCall) Filter(filter string) *InfoTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// LanguageCode sets the optional parameter "languageCode": Optional +// BCP-47 language code for localized infoType friendly +// names. If omitted, or if localized strings are not available, +// en-US strings will be returned. +func (c *InfoTypesListCall) LanguageCode(languageCode string) *InfoTypesListCall { + c.urlParams_.Set("languageCode", languageCode) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InfoTypesListCall) Fields(s ...googleapi.Field) *InfoTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InfoTypesListCall) IfNoneMatch(entityTag string) *InfoTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InfoTypesListCall) Context(ctx context.Context) *InfoTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InfoTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InfoTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/infoTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.infoTypes.list" call. +// Exactly one of *GooglePrivacyDlpV2ListInfoTypesResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GooglePrivacyDlpV2ListInfoTypesResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InfoTypesListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListInfoTypesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListInfoTypesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns sensitive information types DLP supports.", + // "flatPath": "v2/infoTypes", + // "httpMethod": "GET", + // "id": "dlp.infoTypes.list", + // "parameterOrder": [], + // "parameters": { + // "filter": { + // "description": "Optional filter to only return infoTypes supported by certain parts of the\nAPI. Defaults to supported_by=INSPECT.", + // "location": "query", + // "type": "string" + // }, + // "languageCode": { + // "description": "Optional BCP-47 language code for localized infoType friendly\nnames. If omitted, or if localized strings are not available,\nen-US strings will be returned.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/infoTypes", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListInfoTypesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.deidentifyTemplates.create": + +type OrganizationsDeidentifyTemplatesCreateCall struct { + s *Service + parent string + googleprivacydlpv2createdeidentifytemplaterequest *GooglePrivacyDlpV2CreateDeidentifyTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a de-identify template for re-using frequently used +// configuration +// for Deidentifying content, images, and storage. +func (r *OrganizationsDeidentifyTemplatesService) Create(parent string, googleprivacydlpv2createdeidentifytemplaterequest *GooglePrivacyDlpV2CreateDeidentifyTemplateRequest) *OrganizationsDeidentifyTemplatesCreateCall { + c := &OrganizationsDeidentifyTemplatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2createdeidentifytemplaterequest = googleprivacydlpv2createdeidentifytemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsDeidentifyTemplatesCreateCall) Fields(s ...googleapi.Field) *OrganizationsDeidentifyTemplatesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsDeidentifyTemplatesCreateCall) Context(ctx context.Context) *OrganizationsDeidentifyTemplatesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsDeidentifyTemplatesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsDeidentifyTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2createdeidentifytemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/deidentifyTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.deidentifyTemplates.create" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2DeidentifyTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsDeidentifyTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a de-identify template for re-using frequently used configuration\nfor Deidentifying content, images, and storage.", + // "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates", + // "httpMethod": "POST", + // "id": "dlp.organizations.deidentifyTemplates.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/deidentifyTemplates", + // "request": { + // "$ref": "GooglePrivacyDlpV2CreateDeidentifyTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.deidentifyTemplates.delete": + +type OrganizationsDeidentifyTemplatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a de-identify template. +func (r *OrganizationsDeidentifyTemplatesService) Delete(name string) *OrganizationsDeidentifyTemplatesDeleteCall { + c := &OrganizationsDeidentifyTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsDeidentifyTemplatesDeleteCall) Fields(s ...googleapi.Field) *OrganizationsDeidentifyTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsDeidentifyTemplatesDeleteCall) Context(ctx context.Context) *OrganizationsDeidentifyTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsDeidentifyTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsDeidentifyTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.deidentifyTemplates.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsDeidentifyTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a de-identify template.", + // "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates/{deidentifyTemplatesId}", + // "httpMethod": "DELETE", + // "id": "dlp.organizations.deidentifyTemplates.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and deidentify template to be deleted,\nfor example `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + // "location": "path", + // "pattern": "^organizations/[^/]+/deidentifyTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.deidentifyTemplates.get": + +type OrganizationsDeidentifyTemplatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a de-identify template. +func (r *OrganizationsDeidentifyTemplatesService) Get(name string) *OrganizationsDeidentifyTemplatesGetCall { + c := &OrganizationsDeidentifyTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsDeidentifyTemplatesGetCall) Fields(s ...googleapi.Field) *OrganizationsDeidentifyTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsDeidentifyTemplatesGetCall) IfNoneMatch(entityTag string) *OrganizationsDeidentifyTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsDeidentifyTemplatesGetCall) Context(ctx context.Context) *OrganizationsDeidentifyTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsDeidentifyTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsDeidentifyTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.deidentifyTemplates.get" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2DeidentifyTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsDeidentifyTemplatesGetCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a de-identify template.", + // "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates/{deidentifyTemplatesId}", + // "httpMethod": "GET", + // "id": "dlp.organizations.deidentifyTemplates.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and deidentify template to be read, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + // "location": "path", + // "pattern": "^organizations/[^/]+/deidentifyTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.deidentifyTemplates.list": + +type OrganizationsDeidentifyTemplatesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists de-identify templates. +func (r *OrganizationsDeidentifyTemplatesService) List(parent string) *OrganizationsDeidentifyTemplatesListCall { + c := &OrganizationsDeidentifyTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Optional size of the +// page, can be limited by server. If zero server returns +// a page of max size 100. +func (c *OrganizationsDeidentifyTemplatesListCall) PageSize(pageSize int64) *OrganizationsDeidentifyTemplatesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional page +// token to continue retrieval. Comes from previous call +// to `ListDeidentifyTemplates`. +func (c *OrganizationsDeidentifyTemplatesListCall) PageToken(pageToken string) *OrganizationsDeidentifyTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsDeidentifyTemplatesListCall) Fields(s ...googleapi.Field) *OrganizationsDeidentifyTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsDeidentifyTemplatesListCall) IfNoneMatch(entityTag string) *OrganizationsDeidentifyTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsDeidentifyTemplatesListCall) Context(ctx context.Context) *OrganizationsDeidentifyTemplatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsDeidentifyTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsDeidentifyTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/deidentifyTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.deidentifyTemplates.list" call. +// Exactly one of *GooglePrivacyDlpV2ListDeidentifyTemplatesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2ListDeidentifyTemplatesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsDeidentifyTemplatesListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListDeidentifyTemplatesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListDeidentifyTemplatesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists de-identify templates.", + // "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates", + // "httpMethod": "GET", + // "id": "dlp.organizations.deidentifyTemplates.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListDeidentifyTemplates`.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/deidentifyTemplates", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListDeidentifyTemplatesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsDeidentifyTemplatesListCall) Pages(ctx context.Context, f func(*GooglePrivacyDlpV2ListDeidentifyTemplatesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dlp.organizations.deidentifyTemplates.patch": + +type OrganizationsDeidentifyTemplatesPatchCall struct { + s *Service + name string + googleprivacydlpv2updatedeidentifytemplaterequest *GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the de-identify template. +func (r *OrganizationsDeidentifyTemplatesService) Patch(name string, googleprivacydlpv2updatedeidentifytemplaterequest *GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest) *OrganizationsDeidentifyTemplatesPatchCall { + c := &OrganizationsDeidentifyTemplatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.googleprivacydlpv2updatedeidentifytemplaterequest = googleprivacydlpv2updatedeidentifytemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsDeidentifyTemplatesPatchCall) Fields(s ...googleapi.Field) *OrganizationsDeidentifyTemplatesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsDeidentifyTemplatesPatchCall) Context(ctx context.Context) *OrganizationsDeidentifyTemplatesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsDeidentifyTemplatesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsDeidentifyTemplatesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2updatedeidentifytemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.deidentifyTemplates.patch" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2DeidentifyTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsDeidentifyTemplatesPatchCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the de-identify template.", + // "flatPath": "v2/organizations/{organizationsId}/deidentifyTemplates/{deidentifyTemplatesId}", + // "httpMethod": "PATCH", + // "id": "dlp.organizations.deidentifyTemplates.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of organization and deidentify template to be updated, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + // "location": "path", + // "pattern": "^organizations/[^/]+/deidentifyTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.inspectTemplates.create": + +type OrganizationsInspectTemplatesCreateCall struct { + s *Service + parent string + googleprivacydlpv2createinspecttemplaterequest *GooglePrivacyDlpV2CreateInspectTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an inspect template for re-using frequently used +// configuration +// for inspecting content, images, and storage. +func (r *OrganizationsInspectTemplatesService) Create(parent string, googleprivacydlpv2createinspecttemplaterequest *GooglePrivacyDlpV2CreateInspectTemplateRequest) *OrganizationsInspectTemplatesCreateCall { + c := &OrganizationsInspectTemplatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2createinspecttemplaterequest = googleprivacydlpv2createinspecttemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsInspectTemplatesCreateCall) Fields(s ...googleapi.Field) *OrganizationsInspectTemplatesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsInspectTemplatesCreateCall) Context(ctx context.Context) *OrganizationsInspectTemplatesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsInspectTemplatesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsInspectTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2createinspecttemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/inspectTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.inspectTemplates.create" call. +// Exactly one of *GooglePrivacyDlpV2InspectTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2InspectTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsInspectTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an inspect template for re-using frequently used configuration\nfor inspecting content, images, and storage.", + // "flatPath": "v2/organizations/{organizationsId}/inspectTemplates", + // "httpMethod": "POST", + // "id": "dlp.organizations.inspectTemplates.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/inspectTemplates", + // "request": { + // "$ref": "GooglePrivacyDlpV2CreateInspectTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.inspectTemplates.delete": + +type OrganizationsInspectTemplatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an inspect template. +func (r *OrganizationsInspectTemplatesService) Delete(name string) *OrganizationsInspectTemplatesDeleteCall { + c := &OrganizationsInspectTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsInspectTemplatesDeleteCall) Fields(s ...googleapi.Field) *OrganizationsInspectTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsInspectTemplatesDeleteCall) Context(ctx context.Context) *OrganizationsInspectTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsInspectTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsInspectTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.inspectTemplates.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OrganizationsInspectTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an inspect template.", + // "flatPath": "v2/organizations/{organizationsId}/inspectTemplates/{inspectTemplatesId}", + // "httpMethod": "DELETE", + // "id": "dlp.organizations.inspectTemplates.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and inspectTemplate to be deleted, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + // "location": "path", + // "pattern": "^organizations/[^/]+/inspectTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.inspectTemplates.get": + +type OrganizationsInspectTemplatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets an inspect template. +func (r *OrganizationsInspectTemplatesService) Get(name string) *OrganizationsInspectTemplatesGetCall { + c := &OrganizationsInspectTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsInspectTemplatesGetCall) Fields(s ...googleapi.Field) *OrganizationsInspectTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsInspectTemplatesGetCall) IfNoneMatch(entityTag string) *OrganizationsInspectTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsInspectTemplatesGetCall) Context(ctx context.Context) *OrganizationsInspectTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsInspectTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsInspectTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.inspectTemplates.get" call. +// Exactly one of *GooglePrivacyDlpV2InspectTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2InspectTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsInspectTemplatesGetCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets an inspect template.", + // "flatPath": "v2/organizations/{organizationsId}/inspectTemplates/{inspectTemplatesId}", + // "httpMethod": "GET", + // "id": "dlp.organizations.inspectTemplates.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and inspectTemplate to be read, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + // "location": "path", + // "pattern": "^organizations/[^/]+/inspectTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.organizations.inspectTemplates.list": + +type OrganizationsInspectTemplatesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists inspect templates. +func (r *OrganizationsInspectTemplatesService) List(parent string) *OrganizationsInspectTemplatesListCall { + c := &OrganizationsInspectTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Optional size of the +// page, can be limited by server. If zero server returns +// a page of max size 100. +func (c *OrganizationsInspectTemplatesListCall) PageSize(pageSize int64) *OrganizationsInspectTemplatesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional page +// token to continue retrieval. Comes from previous call +// to `ListInspectTemplates`. +func (c *OrganizationsInspectTemplatesListCall) PageToken(pageToken string) *OrganizationsInspectTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsInspectTemplatesListCall) Fields(s ...googleapi.Field) *OrganizationsInspectTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OrganizationsInspectTemplatesListCall) IfNoneMatch(entityTag string) *OrganizationsInspectTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsInspectTemplatesListCall) Context(ctx context.Context) *OrganizationsInspectTemplatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsInspectTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsInspectTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/inspectTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.inspectTemplates.list" call. +// Exactly one of *GooglePrivacyDlpV2ListInspectTemplatesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2ListInspectTemplatesResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsInspectTemplatesListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListInspectTemplatesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListInspectTemplatesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists inspect templates.", + // "flatPath": "v2/organizations/{organizationsId}/inspectTemplates", + // "httpMethod": "GET", + // "id": "dlp.organizations.inspectTemplates.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListInspectTemplates`.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^organizations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/inspectTemplates", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListInspectTemplatesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OrganizationsInspectTemplatesListCall) Pages(ctx context.Context, f func(*GooglePrivacyDlpV2ListInspectTemplatesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dlp.organizations.inspectTemplates.patch": + +type OrganizationsInspectTemplatesPatchCall struct { + s *Service + name string + googleprivacydlpv2updateinspecttemplaterequest *GooglePrivacyDlpV2UpdateInspectTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the inspect template. +func (r *OrganizationsInspectTemplatesService) Patch(name string, googleprivacydlpv2updateinspecttemplaterequest *GooglePrivacyDlpV2UpdateInspectTemplateRequest) *OrganizationsInspectTemplatesPatchCall { + c := &OrganizationsInspectTemplatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.googleprivacydlpv2updateinspecttemplaterequest = googleprivacydlpv2updateinspecttemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OrganizationsInspectTemplatesPatchCall) Fields(s ...googleapi.Field) *OrganizationsInspectTemplatesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OrganizationsInspectTemplatesPatchCall) Context(ctx context.Context) *OrganizationsInspectTemplatesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OrganizationsInspectTemplatesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OrganizationsInspectTemplatesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2updateinspecttemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.organizations.inspectTemplates.patch" call. +// Exactly one of *GooglePrivacyDlpV2InspectTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2InspectTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *OrganizationsInspectTemplatesPatchCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the inspect template.", + // "flatPath": "v2/organizations/{organizationsId}/inspectTemplates/{inspectTemplatesId}", + // "httpMethod": "PATCH", + // "id": "dlp.organizations.inspectTemplates.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of organization and inspectTemplate to be updated, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + // "location": "path", + // "pattern": "^organizations/[^/]+/inspectTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GooglePrivacyDlpV2UpdateInspectTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.content.deidentify": + +type ProjectsContentDeidentifyCall struct { + s *Service + parent string + googleprivacydlpv2deidentifycontentrequest *GooglePrivacyDlpV2DeidentifyContentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Deidentify: De-identifies potentially sensitive info from a +// ContentItem. +// This method has limits on input size and output size. +// [How-to guide](/dlp/docs/deidentify-sensitive-data) +func (r *ProjectsContentService) Deidentify(parent string, googleprivacydlpv2deidentifycontentrequest *GooglePrivacyDlpV2DeidentifyContentRequest) *ProjectsContentDeidentifyCall { + c := &ProjectsContentDeidentifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2deidentifycontentrequest = googleprivacydlpv2deidentifycontentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsContentDeidentifyCall) Fields(s ...googleapi.Field) *ProjectsContentDeidentifyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsContentDeidentifyCall) Context(ctx context.Context) *ProjectsContentDeidentifyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsContentDeidentifyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsContentDeidentifyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2deidentifycontentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/content:deidentify") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.content.deidentify" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyContentResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2DeidentifyContentResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsContentDeidentifyCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyContentResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyContentResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "De-identifies potentially sensitive info from a ContentItem.\nThis method has limits on input size and output size.\n[How-to guide](/dlp/docs/deidentify-sensitive-data)", + // "flatPath": "v2/projects/{projectsId}/content:deidentify", + // "httpMethod": "POST", + // "id": "dlp.projects.content.deidentify", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/content:deidentify", + // "request": { + // "$ref": "GooglePrivacyDlpV2DeidentifyContentRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyContentResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.content.inspect": + +type ProjectsContentInspectCall struct { + s *Service + parent string + googleprivacydlpv2inspectcontentrequest *GooglePrivacyDlpV2InspectContentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Inspect: Finds potentially sensitive info in content. +// This method has limits on input size, processing time, and output +// size. +// [How-to guide for text](/dlp/docs/inspecting-text), [How-to guide +// for +// images](/dlp/docs/inspecting-images) +func (r *ProjectsContentService) Inspect(parent string, googleprivacydlpv2inspectcontentrequest *GooglePrivacyDlpV2InspectContentRequest) *ProjectsContentInspectCall { + c := &ProjectsContentInspectCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2inspectcontentrequest = googleprivacydlpv2inspectcontentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsContentInspectCall) Fields(s ...googleapi.Field) *ProjectsContentInspectCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsContentInspectCall) Context(ctx context.Context) *ProjectsContentInspectCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsContentInspectCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsContentInspectCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2inspectcontentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/content:inspect") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.content.inspect" call. +// Exactly one of *GooglePrivacyDlpV2InspectContentResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2InspectContentResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsContentInspectCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectContentResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectContentResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Finds potentially sensitive info in content.\nThis method has limits on input size, processing time, and output size.\n[How-to guide for text](/dlp/docs/inspecting-text), [How-to guide for\nimages](/dlp/docs/inspecting-images)", + // "flatPath": "v2/projects/{projectsId}/content:inspect", + // "httpMethod": "POST", + // "id": "dlp.projects.content.inspect", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/content:inspect", + // "request": { + // "$ref": "GooglePrivacyDlpV2InspectContentRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectContentResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.content.reidentify": + +type ProjectsContentReidentifyCall struct { + s *Service + parent string + googleprivacydlpv2reidentifycontentrequest *GooglePrivacyDlpV2ReidentifyContentRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Reidentify: Re-identifies content that has been de-identified. +func (r *ProjectsContentService) Reidentify(parent string, googleprivacydlpv2reidentifycontentrequest *GooglePrivacyDlpV2ReidentifyContentRequest) *ProjectsContentReidentifyCall { + c := &ProjectsContentReidentifyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2reidentifycontentrequest = googleprivacydlpv2reidentifycontentrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsContentReidentifyCall) Fields(s ...googleapi.Field) *ProjectsContentReidentifyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsContentReidentifyCall) Context(ctx context.Context) *ProjectsContentReidentifyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsContentReidentifyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsContentReidentifyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2reidentifycontentrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/content:reidentify") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.content.reidentify" call. +// Exactly one of *GooglePrivacyDlpV2ReidentifyContentResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2ReidentifyContentResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsContentReidentifyCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ReidentifyContentResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ReidentifyContentResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Re-identifies content that has been de-identified.", + // "flatPath": "v2/projects/{projectsId}/content:reidentify", + // "httpMethod": "POST", + // "id": "dlp.projects.content.reidentify", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/content:reidentify", + // "request": { + // "$ref": "GooglePrivacyDlpV2ReidentifyContentRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2ReidentifyContentResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.deidentifyTemplates.create": + +type ProjectsDeidentifyTemplatesCreateCall struct { + s *Service + parent string + googleprivacydlpv2createdeidentifytemplaterequest *GooglePrivacyDlpV2CreateDeidentifyTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a de-identify template for re-using frequently used +// configuration +// for Deidentifying content, images, and storage. +func (r *ProjectsDeidentifyTemplatesService) Create(parent string, googleprivacydlpv2createdeidentifytemplaterequest *GooglePrivacyDlpV2CreateDeidentifyTemplateRequest) *ProjectsDeidentifyTemplatesCreateCall { + c := &ProjectsDeidentifyTemplatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2createdeidentifytemplaterequest = googleprivacydlpv2createdeidentifytemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDeidentifyTemplatesCreateCall) Fields(s ...googleapi.Field) *ProjectsDeidentifyTemplatesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDeidentifyTemplatesCreateCall) Context(ctx context.Context) *ProjectsDeidentifyTemplatesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDeidentifyTemplatesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDeidentifyTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2createdeidentifytemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/deidentifyTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.deidentifyTemplates.create" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2DeidentifyTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsDeidentifyTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a de-identify template for re-using frequently used configuration\nfor Deidentifying content, images, and storage.", + // "flatPath": "v2/projects/{projectsId}/deidentifyTemplates", + // "httpMethod": "POST", + // "id": "dlp.projects.deidentifyTemplates.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/deidentifyTemplates", + // "request": { + // "$ref": "GooglePrivacyDlpV2CreateDeidentifyTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.deidentifyTemplates.delete": + +type ProjectsDeidentifyTemplatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a de-identify template. +func (r *ProjectsDeidentifyTemplatesService) Delete(name string) *ProjectsDeidentifyTemplatesDeleteCall { + c := &ProjectsDeidentifyTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDeidentifyTemplatesDeleteCall) Fields(s ...googleapi.Field) *ProjectsDeidentifyTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDeidentifyTemplatesDeleteCall) Context(ctx context.Context) *ProjectsDeidentifyTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDeidentifyTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDeidentifyTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.deidentifyTemplates.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsDeidentifyTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a de-identify template.", + // "flatPath": "v2/projects/{projectsId}/deidentifyTemplates/{deidentifyTemplatesId}", + // "httpMethod": "DELETE", + // "id": "dlp.projects.deidentifyTemplates.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and deidentify template to be deleted,\nfor example `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + // "location": "path", + // "pattern": "^projects/[^/]+/deidentifyTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.deidentifyTemplates.get": + +type ProjectsDeidentifyTemplatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a de-identify template. +func (r *ProjectsDeidentifyTemplatesService) Get(name string) *ProjectsDeidentifyTemplatesGetCall { + c := &ProjectsDeidentifyTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDeidentifyTemplatesGetCall) Fields(s ...googleapi.Field) *ProjectsDeidentifyTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsDeidentifyTemplatesGetCall) IfNoneMatch(entityTag string) *ProjectsDeidentifyTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDeidentifyTemplatesGetCall) Context(ctx context.Context) *ProjectsDeidentifyTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDeidentifyTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDeidentifyTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.deidentifyTemplates.get" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2DeidentifyTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsDeidentifyTemplatesGetCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a de-identify template.", + // "flatPath": "v2/projects/{projectsId}/deidentifyTemplates/{deidentifyTemplatesId}", + // "httpMethod": "GET", + // "id": "dlp.projects.deidentifyTemplates.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and deidentify template to be read, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + // "location": "path", + // "pattern": "^projects/[^/]+/deidentifyTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.deidentifyTemplates.list": + +type ProjectsDeidentifyTemplatesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists de-identify templates. +func (r *ProjectsDeidentifyTemplatesService) List(parent string) *ProjectsDeidentifyTemplatesListCall { + c := &ProjectsDeidentifyTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Optional size of the +// page, can be limited by server. If zero server returns +// a page of max size 100. +func (c *ProjectsDeidentifyTemplatesListCall) PageSize(pageSize int64) *ProjectsDeidentifyTemplatesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional page +// token to continue retrieval. Comes from previous call +// to `ListDeidentifyTemplates`. +func (c *ProjectsDeidentifyTemplatesListCall) PageToken(pageToken string) *ProjectsDeidentifyTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDeidentifyTemplatesListCall) Fields(s ...googleapi.Field) *ProjectsDeidentifyTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsDeidentifyTemplatesListCall) IfNoneMatch(entityTag string) *ProjectsDeidentifyTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDeidentifyTemplatesListCall) Context(ctx context.Context) *ProjectsDeidentifyTemplatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDeidentifyTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDeidentifyTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/deidentifyTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.deidentifyTemplates.list" call. +// Exactly one of *GooglePrivacyDlpV2ListDeidentifyTemplatesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2ListDeidentifyTemplatesResponse.ServerResponse.Head +// er or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsDeidentifyTemplatesListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListDeidentifyTemplatesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListDeidentifyTemplatesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists de-identify templates.", + // "flatPath": "v2/projects/{projectsId}/deidentifyTemplates", + // "httpMethod": "GET", + // "id": "dlp.projects.deidentifyTemplates.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListDeidentifyTemplates`.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/deidentifyTemplates", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListDeidentifyTemplatesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsDeidentifyTemplatesListCall) Pages(ctx context.Context, f func(*GooglePrivacyDlpV2ListDeidentifyTemplatesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dlp.projects.deidentifyTemplates.patch": + +type ProjectsDeidentifyTemplatesPatchCall struct { + s *Service + name string + googleprivacydlpv2updatedeidentifytemplaterequest *GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the de-identify template. +func (r *ProjectsDeidentifyTemplatesService) Patch(name string, googleprivacydlpv2updatedeidentifytemplaterequest *GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest) *ProjectsDeidentifyTemplatesPatchCall { + c := &ProjectsDeidentifyTemplatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.googleprivacydlpv2updatedeidentifytemplaterequest = googleprivacydlpv2updatedeidentifytemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDeidentifyTemplatesPatchCall) Fields(s ...googleapi.Field) *ProjectsDeidentifyTemplatesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDeidentifyTemplatesPatchCall) Context(ctx context.Context) *ProjectsDeidentifyTemplatesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDeidentifyTemplatesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDeidentifyTemplatesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2updatedeidentifytemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.deidentifyTemplates.patch" call. +// Exactly one of *GooglePrivacyDlpV2DeidentifyTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2DeidentifyTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsDeidentifyTemplatesPatchCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DeidentifyTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DeidentifyTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the de-identify template.", + // "flatPath": "v2/projects/{projectsId}/deidentifyTemplates/{deidentifyTemplatesId}", + // "httpMethod": "PATCH", + // "id": "dlp.projects.deidentifyTemplates.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of organization and deidentify template to be updated, for\nexample `organizations/433245324/deidentifyTemplates/432452342` or\nprojects/project-id/deidentifyTemplates/432452342.", + // "location": "path", + // "pattern": "^projects/[^/]+/deidentifyTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GooglePrivacyDlpV2UpdateDeidentifyTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2DeidentifyTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.dlpJobs.cancel": + +type ProjectsDlpJobsCancelCall struct { + s *Service + name string + googleprivacydlpv2canceldlpjobrequest *GooglePrivacyDlpV2CancelDlpJobRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running DlpJob. +// The server +// makes a best effort to cancel the DlpJob, but success is +// not +// guaranteed. +func (r *ProjectsDlpJobsService) Cancel(name string, googleprivacydlpv2canceldlpjobrequest *GooglePrivacyDlpV2CancelDlpJobRequest) *ProjectsDlpJobsCancelCall { + c := &ProjectsDlpJobsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.googleprivacydlpv2canceldlpjobrequest = googleprivacydlpv2canceldlpjobrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDlpJobsCancelCall) Fields(s ...googleapi.Field) *ProjectsDlpJobsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDlpJobsCancelCall) Context(ctx context.Context) *ProjectsDlpJobsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDlpJobsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDlpJobsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2canceldlpjobrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:cancel") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.dlpJobs.cancel" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsDlpJobsCancelCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts asynchronous cancellation on a long-running DlpJob. The server\nmakes a best effort to cancel the DlpJob, but success is not\nguaranteed.", + // "flatPath": "v2/projects/{projectsId}/dlpJobs/{dlpJobsId}:cancel", + // "httpMethod": "POST", + // "id": "dlp.projects.dlpJobs.cancel", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the DlpJob resource to be cancelled.", + // "location": "path", + // "pattern": "^projects/[^/]+/dlpJobs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}:cancel", + // "request": { + // "$ref": "GooglePrivacyDlpV2CancelDlpJobRequest" + // }, + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.dlpJobs.create": + +type ProjectsDlpJobsCreateCall struct { + s *Service + parent string + googleprivacydlpv2createdlpjobrequest *GooglePrivacyDlpV2CreateDlpJobRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new job to inspect storage or calculate risk +// metrics [How-to +// guide](/dlp/docs/compute-risk-analysis). +func (r *ProjectsDlpJobsService) Create(parent string, googleprivacydlpv2createdlpjobrequest *GooglePrivacyDlpV2CreateDlpJobRequest) *ProjectsDlpJobsCreateCall { + c := &ProjectsDlpJobsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2createdlpjobrequest = googleprivacydlpv2createdlpjobrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDlpJobsCreateCall) Fields(s ...googleapi.Field) *ProjectsDlpJobsCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDlpJobsCreateCall) Context(ctx context.Context) *ProjectsDlpJobsCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDlpJobsCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDlpJobsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2createdlpjobrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/dlpJobs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.dlpJobs.create" call. +// Exactly one of *GooglePrivacyDlpV2DlpJob or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GooglePrivacyDlpV2DlpJob.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsDlpJobsCreateCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DlpJob, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DlpJob{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new job to inspect storage or calculate risk metrics [How-to\nguide](/dlp/docs/compute-risk-analysis).", + // "flatPath": "v2/projects/{projectsId}/dlpJobs", + // "httpMethod": "POST", + // "id": "dlp.projects.dlpJobs.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/dlpJobs", + // "request": { + // "$ref": "GooglePrivacyDlpV2CreateDlpJobRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2DlpJob" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.dlpJobs.delete": + +type ProjectsDlpJobsDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a long-running DlpJob. This method indicates that the +// client is +// no longer interested in the DlpJob result. The job will be cancelled +// if +// possible. +func (r *ProjectsDlpJobsService) Delete(name string) *ProjectsDlpJobsDeleteCall { + c := &ProjectsDlpJobsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDlpJobsDeleteCall) Fields(s ...googleapi.Field) *ProjectsDlpJobsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDlpJobsDeleteCall) Context(ctx context.Context) *ProjectsDlpJobsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDlpJobsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDlpJobsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.dlpJobs.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsDlpJobsDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a long-running DlpJob. This method indicates that the client is\nno longer interested in the DlpJob result. The job will be cancelled if\npossible.", + // "flatPath": "v2/projects/{projectsId}/dlpJobs/{dlpJobsId}", + // "httpMethod": "DELETE", + // "id": "dlp.projects.dlpJobs.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the DlpJob resource to be deleted.", + // "location": "path", + // "pattern": "^projects/[^/]+/dlpJobs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.dlpJobs.get": + +type ProjectsDlpJobsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running DlpJob. +func (r *ProjectsDlpJobsService) Get(name string) *ProjectsDlpJobsGetCall { + c := &ProjectsDlpJobsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDlpJobsGetCall) Fields(s ...googleapi.Field) *ProjectsDlpJobsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsDlpJobsGetCall) IfNoneMatch(entityTag string) *ProjectsDlpJobsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDlpJobsGetCall) Context(ctx context.Context) *ProjectsDlpJobsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDlpJobsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDlpJobsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.dlpJobs.get" call. +// Exactly one of *GooglePrivacyDlpV2DlpJob or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GooglePrivacyDlpV2DlpJob.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsDlpJobsGetCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2DlpJob, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2DlpJob{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running DlpJob.", + // "flatPath": "v2/projects/{projectsId}/dlpJobs/{dlpJobsId}", + // "httpMethod": "GET", + // "id": "dlp.projects.dlpJobs.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the DlpJob resource.", + // "location": "path", + // "pattern": "^projects/[^/]+/dlpJobs/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GooglePrivacyDlpV2DlpJob" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.dlpJobs.list": + +type ProjectsDlpJobsListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists DlpJobs that match the specified filter in the request. +func (r *ProjectsDlpJobsService) List(parent string) *ProjectsDlpJobsListCall { + c := &ProjectsDlpJobsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": Allows +// filtering. +// +// Supported syntax: +// +// * Filter expressions are made up of one or more restrictions. +// * Restrictions can be combined by `AND` or `OR` logical operators. +// A +// sequence of restrictions implicitly uses `AND`. +// * A restriction has the form of ` `. +// * Supported fields/values for inspect jobs: +// - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED +// - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY +// - `trigger_name` - The resource name of the trigger that created +// job. +// * Supported fields for risk analysis jobs: +// - `state` - RUNNING|CANCELED|FINISHED|FAILED +// * The operator must be `=` or `!=`. +// +// Examples: +// +// * inspected_storage = cloud_storage AND state = done +// * inspected_storage = cloud_storage OR inspected_storage = bigquery +// * inspected_storage = cloud_storage AND (state = done OR state = +// canceled) +// +// The length of this field should be no more than 500 characters. +func (c *ProjectsDlpJobsListCall) Filter(filter string) *ProjectsDlpJobsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": The standard list +// page size. +func (c *ProjectsDlpJobsListCall) PageSize(pageSize int64) *ProjectsDlpJobsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The standard list +// page token. +func (c *ProjectsDlpJobsListCall) PageToken(pageToken string) *ProjectsDlpJobsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Type sets the optional parameter "type": The type of job. Defaults to +// `DlpJobType.INSPECT` +// +// Possible values: +// "DLP_JOB_TYPE_UNSPECIFIED" +// "INSPECT_JOB" +// "RISK_ANALYSIS_JOB" +func (c *ProjectsDlpJobsListCall) Type(type_ string) *ProjectsDlpJobsListCall { + c.urlParams_.Set("type", type_) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsDlpJobsListCall) Fields(s ...googleapi.Field) *ProjectsDlpJobsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsDlpJobsListCall) IfNoneMatch(entityTag string) *ProjectsDlpJobsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsDlpJobsListCall) Context(ctx context.Context) *ProjectsDlpJobsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsDlpJobsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsDlpJobsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/dlpJobs") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.dlpJobs.list" call. +// Exactly one of *GooglePrivacyDlpV2ListDlpJobsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GooglePrivacyDlpV2ListDlpJobsResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsDlpJobsListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListDlpJobsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListDlpJobsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists DlpJobs that match the specified filter in the request.", + // "flatPath": "v2/projects/{projectsId}/dlpJobs", + // "httpMethod": "GET", + // "id": "dlp.projects.dlpJobs.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "Optional. Allows filtering.\n\nSupported syntax:\n\n* Filter expressions are made up of one or more restrictions.\n* Restrictions can be combined by `AND` or `OR` logical operators. A\nsequence of restrictions implicitly uses `AND`.\n* A restriction has the form of `\u003cfield\u003e \u003coperator\u003e \u003cvalue\u003e`.\n* Supported fields/values for inspect jobs:\n - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED\n - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY\n - `trigger_name` - The resource name of the trigger that created job.\n* Supported fields for risk analysis jobs:\n - `state` - RUNNING|CANCELED|FINISHED|FAILED\n* The operator must be `=` or `!=`.\n\nExamples:\n\n* inspected_storage = cloud_storage AND state = done\n* inspected_storage = cloud_storage OR inspected_storage = bigquery\n* inspected_storage = cloud_storage AND (state = done OR state = canceled)\n\nThe length of this field should be no more than 500 characters.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The standard list page size.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The standard list page token.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // }, + // "type": { + // "description": "The type of job. Defaults to `DlpJobType.INSPECT`", + // "enum": [ + // "DLP_JOB_TYPE_UNSPECIFIED", + // "INSPECT_JOB", + // "RISK_ANALYSIS_JOB" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/dlpJobs", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListDlpJobsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsDlpJobsListCall) Pages(ctx context.Context, f func(*GooglePrivacyDlpV2ListDlpJobsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dlp.projects.image.redact": + +type ProjectsImageRedactCall struct { + s *Service + parent string + googleprivacydlpv2redactimagerequest *GooglePrivacyDlpV2RedactImageRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Redact: Redacts potentially sensitive info from an image. +// This method has limits on input size, processing time, and output +// size. +// [How-to guide](/dlp/docs/redacting-sensitive-data-images) +func (r *ProjectsImageService) Redact(parent string, googleprivacydlpv2redactimagerequest *GooglePrivacyDlpV2RedactImageRequest) *ProjectsImageRedactCall { + c := &ProjectsImageRedactCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2redactimagerequest = googleprivacydlpv2redactimagerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsImageRedactCall) Fields(s ...googleapi.Field) *ProjectsImageRedactCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsImageRedactCall) Context(ctx context.Context) *ProjectsImageRedactCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsImageRedactCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsImageRedactCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2redactimagerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/image:redact") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.image.redact" call. +// Exactly one of *GooglePrivacyDlpV2RedactImageResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GooglePrivacyDlpV2RedactImageResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsImageRedactCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2RedactImageResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2RedactImageResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Redacts potentially sensitive info from an image.\nThis method has limits on input size, processing time, and output size.\n[How-to guide](/dlp/docs/redacting-sensitive-data-images)", + // "flatPath": "v2/projects/{projectsId}/image:redact", + // "httpMethod": "POST", + // "id": "dlp.projects.image.redact", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/image:redact", + // "request": { + // "$ref": "GooglePrivacyDlpV2RedactImageRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2RedactImageResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.inspectTemplates.create": + +type ProjectsInspectTemplatesCreateCall struct { + s *Service + parent string + googleprivacydlpv2createinspecttemplaterequest *GooglePrivacyDlpV2CreateInspectTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates an inspect template for re-using frequently used +// configuration +// for inspecting content, images, and storage. +func (r *ProjectsInspectTemplatesService) Create(parent string, googleprivacydlpv2createinspecttemplaterequest *GooglePrivacyDlpV2CreateInspectTemplateRequest) *ProjectsInspectTemplatesCreateCall { + c := &ProjectsInspectTemplatesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2createinspecttemplaterequest = googleprivacydlpv2createinspecttemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInspectTemplatesCreateCall) Fields(s ...googleapi.Field) *ProjectsInspectTemplatesCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInspectTemplatesCreateCall) Context(ctx context.Context) *ProjectsInspectTemplatesCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInspectTemplatesCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInspectTemplatesCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2createinspecttemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/inspectTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.inspectTemplates.create" call. +// Exactly one of *GooglePrivacyDlpV2InspectTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2InspectTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsInspectTemplatesCreateCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an inspect template for re-using frequently used configuration\nfor inspecting content, images, and storage.", + // "flatPath": "v2/projects/{projectsId}/inspectTemplates", + // "httpMethod": "POST", + // "id": "dlp.projects.inspectTemplates.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/inspectTemplates", + // "request": { + // "$ref": "GooglePrivacyDlpV2CreateInspectTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.inspectTemplates.delete": + +type ProjectsInspectTemplatesDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an inspect template. +func (r *ProjectsInspectTemplatesService) Delete(name string) *ProjectsInspectTemplatesDeleteCall { + c := &ProjectsInspectTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInspectTemplatesDeleteCall) Fields(s ...googleapi.Field) *ProjectsInspectTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInspectTemplatesDeleteCall) Context(ctx context.Context) *ProjectsInspectTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInspectTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInspectTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.inspectTemplates.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsInspectTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes an inspect template.", + // "flatPath": "v2/projects/{projectsId}/inspectTemplates/{inspectTemplatesId}", + // "httpMethod": "DELETE", + // "id": "dlp.projects.inspectTemplates.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and inspectTemplate to be deleted, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + // "location": "path", + // "pattern": "^projects/[^/]+/inspectTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.inspectTemplates.get": + +type ProjectsInspectTemplatesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets an inspect template. +func (r *ProjectsInspectTemplatesService) Get(name string) *ProjectsInspectTemplatesGetCall { + c := &ProjectsInspectTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInspectTemplatesGetCall) Fields(s ...googleapi.Field) *ProjectsInspectTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInspectTemplatesGetCall) IfNoneMatch(entityTag string) *ProjectsInspectTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInspectTemplatesGetCall) Context(ctx context.Context) *ProjectsInspectTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInspectTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInspectTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.inspectTemplates.get" call. +// Exactly one of *GooglePrivacyDlpV2InspectTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2InspectTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsInspectTemplatesGetCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets an inspect template.", + // "flatPath": "v2/projects/{projectsId}/inspectTemplates/{inspectTemplatesId}", + // "httpMethod": "GET", + // "id": "dlp.projects.inspectTemplates.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the organization and inspectTemplate to be read, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + // "location": "path", + // "pattern": "^projects/[^/]+/inspectTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.inspectTemplates.list": + +type ProjectsInspectTemplatesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists inspect templates. +func (r *ProjectsInspectTemplatesService) List(parent string) *ProjectsInspectTemplatesListCall { + c := &ProjectsInspectTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// PageSize sets the optional parameter "pageSize": Optional size of the +// page, can be limited by server. If zero server returns +// a page of max size 100. +func (c *ProjectsInspectTemplatesListCall) PageSize(pageSize int64) *ProjectsInspectTemplatesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional page +// token to continue retrieval. Comes from previous call +// to `ListInspectTemplates`. +func (c *ProjectsInspectTemplatesListCall) PageToken(pageToken string) *ProjectsInspectTemplatesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInspectTemplatesListCall) Fields(s ...googleapi.Field) *ProjectsInspectTemplatesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsInspectTemplatesListCall) IfNoneMatch(entityTag string) *ProjectsInspectTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInspectTemplatesListCall) Context(ctx context.Context) *ProjectsInspectTemplatesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInspectTemplatesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInspectTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/inspectTemplates") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.inspectTemplates.list" call. +// Exactly one of *GooglePrivacyDlpV2ListInspectTemplatesResponse or +// error will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2ListInspectTemplatesResponse.ServerResponse.Header +// or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsInspectTemplatesListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListInspectTemplatesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListInspectTemplatesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists inspect templates.", + // "flatPath": "v2/projects/{projectsId}/inspectTemplates", + // "httpMethod": "GET", + // "id": "dlp.projects.inspectTemplates.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "pageSize": { + // "description": "Optional size of the page, can be limited by server. If zero server returns\na page of max size 100.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional page token to continue retrieval. Comes from previous call\nto `ListInspectTemplates`.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id or\norganizations/my-org-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/inspectTemplates", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListInspectTemplatesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsInspectTemplatesListCall) Pages(ctx context.Context, f func(*GooglePrivacyDlpV2ListInspectTemplatesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dlp.projects.inspectTemplates.patch": + +type ProjectsInspectTemplatesPatchCall struct { + s *Service + name string + googleprivacydlpv2updateinspecttemplaterequest *GooglePrivacyDlpV2UpdateInspectTemplateRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates the inspect template. +func (r *ProjectsInspectTemplatesService) Patch(name string, googleprivacydlpv2updateinspecttemplaterequest *GooglePrivacyDlpV2UpdateInspectTemplateRequest) *ProjectsInspectTemplatesPatchCall { + c := &ProjectsInspectTemplatesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.googleprivacydlpv2updateinspecttemplaterequest = googleprivacydlpv2updateinspecttemplaterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsInspectTemplatesPatchCall) Fields(s ...googleapi.Field) *ProjectsInspectTemplatesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsInspectTemplatesPatchCall) Context(ctx context.Context) *ProjectsInspectTemplatesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsInspectTemplatesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsInspectTemplatesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2updateinspecttemplaterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.inspectTemplates.patch" call. +// Exactly one of *GooglePrivacyDlpV2InspectTemplate or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2InspectTemplate.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsInspectTemplatesPatchCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2InspectTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2InspectTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the inspect template.", + // "flatPath": "v2/projects/{projectsId}/inspectTemplates/{inspectTemplatesId}", + // "httpMethod": "PATCH", + // "id": "dlp.projects.inspectTemplates.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of organization and inspectTemplate to be updated, for\nexample `organizations/433245324/inspectTemplates/432452342` or\nprojects/project-id/inspectTemplates/432452342.", + // "location": "path", + // "pattern": "^projects/[^/]+/inspectTemplates/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GooglePrivacyDlpV2UpdateInspectTemplateRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2InspectTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.jobTriggers.create": + +type ProjectsJobTriggersCreateCall struct { + s *Service + parent string + googleprivacydlpv2createjobtriggerrequest *GooglePrivacyDlpV2CreateJobTriggerRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a job trigger to run DLP actions such as scanning +// storage for +// sensitive information on a set schedule. +func (r *ProjectsJobTriggersService) Create(parent string, googleprivacydlpv2createjobtriggerrequest *GooglePrivacyDlpV2CreateJobTriggerRequest) *ProjectsJobTriggersCreateCall { + c := &ProjectsJobTriggersCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + c.googleprivacydlpv2createjobtriggerrequest = googleprivacydlpv2createjobtriggerrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsJobTriggersCreateCall) Fields(s ...googleapi.Field) *ProjectsJobTriggersCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsJobTriggersCreateCall) Context(ctx context.Context) *ProjectsJobTriggersCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsJobTriggersCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsJobTriggersCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2createjobtriggerrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/jobTriggers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.jobTriggers.create" call. +// Exactly one of *GooglePrivacyDlpV2JobTrigger or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2JobTrigger.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsJobTriggersCreateCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2JobTrigger, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2JobTrigger{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a job trigger to run DLP actions such as scanning storage for\nsensitive information on a set schedule.", + // "flatPath": "v2/projects/{projectsId}/jobTriggers", + // "httpMethod": "POST", + // "id": "dlp.projects.jobTriggers.create", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/jobTriggers", + // "request": { + // "$ref": "GooglePrivacyDlpV2CreateJobTriggerRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2JobTrigger" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.jobTriggers.delete": + +type ProjectsJobTriggersDeleteCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes a job trigger. +func (r *ProjectsJobTriggersService) Delete(name string) *ProjectsJobTriggersDeleteCall { + c := &ProjectsJobTriggersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsJobTriggersDeleteCall) Fields(s ...googleapi.Field) *ProjectsJobTriggersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsJobTriggersDeleteCall) Context(ctx context.Context) *ProjectsJobTriggersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsJobTriggersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsJobTriggersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.jobTriggers.delete" call. +// Exactly one of *GoogleProtobufEmpty or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *GoogleProtobufEmpty.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsJobTriggersDeleteCall) Do(opts ...googleapi.CallOption) (*GoogleProtobufEmpty, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GoogleProtobufEmpty{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a job trigger.", + // "flatPath": "v2/projects/{projectsId}/jobTriggers/{jobTriggersId}", + // "httpMethod": "DELETE", + // "id": "dlp.projects.jobTriggers.delete", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the project and the triggeredJob, for example\n`projects/dlp-test-project/jobTriggers/53234423`.", + // "location": "path", + // "pattern": "^projects/[^/]+/jobTriggers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GoogleProtobufEmpty" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.jobTriggers.get": + +type ProjectsJobTriggersGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets a job trigger. +func (r *ProjectsJobTriggersService) Get(name string) *ProjectsJobTriggersGetCall { + c := &ProjectsJobTriggersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsJobTriggersGetCall) Fields(s ...googleapi.Field) *ProjectsJobTriggersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsJobTriggersGetCall) IfNoneMatch(entityTag string) *ProjectsJobTriggersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsJobTriggersGetCall) Context(ctx context.Context) *ProjectsJobTriggersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsJobTriggersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsJobTriggersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.jobTriggers.get" call. +// Exactly one of *GooglePrivacyDlpV2JobTrigger or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2JobTrigger.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsJobTriggersGetCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2JobTrigger, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2JobTrigger{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a job trigger.", + // "flatPath": "v2/projects/{projectsId}/jobTriggers/{jobTriggersId}", + // "httpMethod": "GET", + // "id": "dlp.projects.jobTriggers.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the project and the triggeredJob, for example\n`projects/dlp-test-project/jobTriggers/53234423`.", + // "location": "path", + // "pattern": "^projects/[^/]+/jobTriggers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "response": { + // "$ref": "GooglePrivacyDlpV2JobTrigger" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "dlp.projects.jobTriggers.list": + +type ProjectsJobTriggersListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists job triggers. +func (r *ProjectsJobTriggersService) List(parent string) *ProjectsJobTriggersListCall { + c := &ProjectsJobTriggersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// OrderBy sets the optional parameter "orderBy": Optional comma +// separated list of triggeredJob fields to order by, +// followed by 'asc/desc' postfix, i.e. +// "create_time asc,name desc,schedule_mode asc". This list +// is +// case-insensitive. +// +// Example: "name asc,schedule_mode desc, status desc" +// +// Supported filters keys and values are: +// +// - `create_time`: corresponds to time the triggeredJob was created. +// - `update_time`: corresponds to time the triggeredJob was last +// updated. +// - `name`: corresponds to JobTrigger's display name. +// - `status`: corresponds to the triggeredJob status. +func (c *ProjectsJobTriggersListCall) OrderBy(orderBy string) *ProjectsJobTriggersListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": Optional size of the +// page, can be limited by a server. +func (c *ProjectsJobTriggersListCall) PageSize(pageSize int64) *ProjectsJobTriggersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": Optional page +// token to continue retrieval. Comes from previous call +// to ListJobTriggers. `order_by` and `filter` should not change +// for +// subsequent calls, but can be omitted if token is specified. +func (c *ProjectsJobTriggersListCall) PageToken(pageToken string) *ProjectsJobTriggersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsJobTriggersListCall) Fields(s ...googleapi.Field) *ProjectsJobTriggersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsJobTriggersListCall) IfNoneMatch(entityTag string) *ProjectsJobTriggersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsJobTriggersListCall) Context(ctx context.Context) *ProjectsJobTriggersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsJobTriggersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsJobTriggersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/jobTriggers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.jobTriggers.list" call. +// Exactly one of *GooglePrivacyDlpV2ListJobTriggersResponse or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *GooglePrivacyDlpV2ListJobTriggersResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ProjectsJobTriggersListCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2ListJobTriggersResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2ListJobTriggersResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists job triggers.", + // "flatPath": "v2/projects/{projectsId}/jobTriggers", + // "httpMethod": "GET", + // "id": "dlp.projects.jobTriggers.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "orderBy": { + // "description": "Optional comma separated list of triggeredJob fields to order by,\nfollowed by 'asc/desc' postfix, i.e.\n`\"create_time asc,name desc,schedule_mode asc\"`. This list is\ncase-insensitive.\n\nExample: `\"name asc,schedule_mode desc, status desc\"`\n\nSupported filters keys and values are:\n\n- `create_time`: corresponds to time the triggeredJob was created.\n- `update_time`: corresponds to time the triggeredJob was last updated.\n- `name`: corresponds to JobTrigger's display name.\n- `status`: corresponds to the triggeredJob status.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Optional size of the page, can be limited by a server.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional page token to continue retrieval. Comes from previous call\nto ListJobTriggers. `order_by` and `filter` should not change for\nsubsequent calls, but can be omitted if token is specified.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name, for example projects/my-project-id.", + // "location": "path", + // "pattern": "^projects/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+parent}/jobTriggers", + // "response": { + // "$ref": "GooglePrivacyDlpV2ListJobTriggersResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsJobTriggersListCall) Pages(ctx context.Context, f func(*GooglePrivacyDlpV2ListJobTriggersResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dlp.projects.jobTriggers.patch": + +type ProjectsJobTriggersPatchCall struct { + s *Service + name string + googleprivacydlpv2updatejobtriggerrequest *GooglePrivacyDlpV2UpdateJobTriggerRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Updates a job trigger. +func (r *ProjectsJobTriggersService) Patch(name string, googleprivacydlpv2updatejobtriggerrequest *GooglePrivacyDlpV2UpdateJobTriggerRequest) *ProjectsJobTriggersPatchCall { + c := &ProjectsJobTriggersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + c.googleprivacydlpv2updatejobtriggerrequest = googleprivacydlpv2updatejobtriggerrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsJobTriggersPatchCall) Fields(s ...googleapi.Field) *ProjectsJobTriggersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsJobTriggersPatchCall) Context(ctx context.Context) *ProjectsJobTriggersPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsJobTriggersPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsJobTriggersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.googleprivacydlpv2updatejobtriggerrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dlp.projects.jobTriggers.patch" call. +// Exactly one of *GooglePrivacyDlpV2JobTrigger or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *GooglePrivacyDlpV2JobTrigger.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsJobTriggersPatchCall) Do(opts ...googleapi.CallOption) (*GooglePrivacyDlpV2JobTrigger, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &GooglePrivacyDlpV2JobTrigger{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a job trigger.", + // "flatPath": "v2/projects/{projectsId}/jobTriggers/{jobTriggersId}", + // "httpMethod": "PATCH", + // "id": "dlp.projects.jobTriggers.patch", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "Resource name of the project and the triggeredJob, for example\n`projects/dlp-test-project/jobTriggers/53234423`.", + // "location": "path", + // "pattern": "^projects/[^/]+/jobTriggers/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v2/{+name}", + // "request": { + // "$ref": "GooglePrivacyDlpV2UpdateJobTriggerRequest" + // }, + // "response": { + // "$ref": "GooglePrivacyDlpV2JobTrigger" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/dlp/v2beta1/dlp-api.json b/vendor/google.golang.org/api/dlp/v2beta1/dlp-api.json index ea9595c19..97411adf0 100644 --- a/vendor/google.golang.org/api/dlp/v2beta1/dlp-api.json +++ b/vendor/google.golang.org/api/dlp/v2beta1/dlp-api.json @@ -12,7 +12,7 @@ "baseUrl": "https://dlp.googleapis.com/", "batchPath": "batch", "canonicalName": "DLP", - "description": "The Google Data Loss Prevention API provides methods for detection of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/dlp/docs/", "fullyEncodeReservedExpansion": true, @@ -578,7 +578,7 @@ } } }, - "revision": "20180220", + "revision": "20180320", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GoogleLongrunningCancelOperationRequest": { @@ -2827,7 +2827,7 @@ } }, "servicePath": "", - "title": "DLP API", + "title": "Cloud Data Loss Prevention (DLP) API", "version": "v2beta1", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/dlp/v2beta1/dlp-gen.go b/vendor/google.golang.org/api/dlp/v2beta1/dlp-gen.go index f9b0f1db1..24b05265c 100644 --- a/vendor/google.golang.org/api/dlp/v2beta1/dlp-gen.go +++ b/vendor/google.golang.org/api/dlp/v2beta1/dlp-gen.go @@ -1,4 +1,4 @@ -// Package dlp provides access to the DLP API. +// Package dlp provides access to the Cloud Data Loss Prevention (DLP) API. // // See https://cloud.google.com/dlp/docs/ // diff --git a/vendor/google.golang.org/api/dlp/v2beta2/dlp-api.json b/vendor/google.golang.org/api/dlp/v2beta2/dlp-api.json index 5357e29da..c64bae936 100644 --- a/vendor/google.golang.org/api/dlp/v2beta2/dlp-api.json +++ b/vendor/google.golang.org/api/dlp/v2beta2/dlp-api.json @@ -12,7 +12,7 @@ "baseUrl": "https://dlp.googleapis.com/", "batchPath": "batch", "canonicalName": "DLP", - "description": "The Google Data Loss Prevention API provides methods for detection of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", + "description": "Provides methods for detection, risk analysis, and de-identification of privacy-sensitive fragments in text, images, and Google Cloud Platform storage repositories.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/dlp/docs/", "fullyEncodeReservedExpansion": true, @@ -1202,7 +1202,7 @@ } } }, - "revision": "20180308", + "revision": "20180320", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GooglePrivacyDlpV2beta1AuxiliaryTable": { @@ -2610,7 +2610,7 @@ "type": "object" }, "GooglePrivacyDlpV2beta2CryptoReplaceFfxFpeConfig": { - "description": "Replaces an identifier with a surrogate using FPE with the FFX\nmode of operation; however when used in the `ReidentifyContent` API method,\nit serves the opposite function by reversing the surrogate back into\nthe original identifier.\nThe identifier must be encoded as ASCII.\nFor a given crypto key and context, the same identifier will be\nreplaced with the same surrogate.\nIdentifiers must be at least two characters long.\nIn the case that the identifier is the empty string, it will be skipped.", + "description": "Replaces an identifier with a surrogate using FPE with the FFX\nmode of operation; however when used in the `ReidentifyContent` API method,\nit serves the opposite function by reversing the surrogate back into\nthe original identifier.\nThe identifier must be encoded as ASCII.\nFor a given crypto key and context, the same identifier will be\nreplaced with the same surrogate.\nIdentifiers must be at least two characters long.\nIn the case that the identifier is the empty string, it will be skipped.\nSee [Pseudonymization](/dlp/docs/pseudonymization) for example usage.", "id": "GooglePrivacyDlpV2beta2CryptoReplaceFfxFpeConfig", "properties": { "commonAlphabet": { @@ -3144,12 +3144,12 @@ "type": "array" }, "maxFindingsPerItem": { - "description": "Max number of findings that will be returned for each item scanned.", + "description": "Max number of findings that will be returned for each item scanned.\nWhen set within `InspectDataSourceRequest`,\nthe maximum returned is 1000 regardless if this is set higher.\nWhen set within `InspectContentRequest`, this field is ignored.", "format": "int32", "type": "integer" }, "maxFindingsPerRequest": { - "description": "Max total number of findings that will be returned per request/job.", + "description": "Max number of findings that will be returned per request/job.\nWhen set within `InspectContentRequest`, the maximum returned is 1000\nregardless if this is set higher.", "format": "int32", "type": "integer" } @@ -5052,7 +5052,7 @@ } }, "servicePath": "", - "title": "DLP API", + "title": "Cloud Data Loss Prevention (DLP) API", "version": "v2beta2", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/dlp/v2beta2/dlp-gen.go b/vendor/google.golang.org/api/dlp/v2beta2/dlp-gen.go index d4b6da856..3ff72e576 100644 --- a/vendor/google.golang.org/api/dlp/v2beta2/dlp-gen.go +++ b/vendor/google.golang.org/api/dlp/v2beta2/dlp-gen.go @@ -1,4 +1,4 @@ -// Package dlp provides access to the DLP API. +// Package dlp provides access to the Cloud Data Loss Prevention (DLP) API. // // See https://cloud.google.com/dlp/docs/ // @@ -3189,6 +3189,7 @@ func (s *GooglePrivacyDlpV2beta2CryptoKey) MarshalJSON() ([]byte, error) { // Identifiers must be at least two characters long. // In the case that the identifier is the empty string, it will be // skipped. +// See [Pseudonymization](/dlp/docs/pseudonymization) for example usage. type GooglePrivacyDlpV2beta2CryptoReplaceFfxFpeConfig struct { // Possible values: // "FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED" @@ -4194,10 +4195,16 @@ type GooglePrivacyDlpV2beta2FindingLimits struct { // MaxFindingsPerItem: Max number of findings that will be returned for // each item scanned. + // When set within `InspectDataSourceRequest`, + // the maximum returned is 1000 regardless if this is set higher. + // When set within `InspectContentRequest`, this field is ignored. MaxFindingsPerItem int64 `json:"maxFindingsPerItem,omitempty"` - // MaxFindingsPerRequest: Max total number of findings that will be - // returned per request/job. + // MaxFindingsPerRequest: Max number of findings that will be returned + // per request/job. + // When set within `InspectContentRequest`, the maximum returned is + // 1000 + // regardless if this is set higher. MaxFindingsPerRequest int64 `json:"maxFindingsPerRequest,omitempty"` // ForceSendFields is a list of field names (e.g. diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index c706db00b..6dd6af223 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -23,7 +23,7 @@ "description": "Configures and serves authoritative DNS records.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/cloud-dns", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/kG__mzxEhJD8E2g48tXilpLKrgU\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/gSzgHqX4Zwypnde2YApimTf_qmE\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -91,6 +91,11 @@ "managedZone" ], "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, "managedZone": { "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", "location": "path", @@ -132,6 +137,11 @@ "required": true, "type": "string" }, + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, "managedZone": { "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", "location": "path", @@ -219,6 +229,216 @@ } } }, + "dnsKeys": { + "methods": { + "get": { + "description": "Fetch the representation of an existing DnsKey.", + "httpMethod": "GET", + "id": "dns.dnsKeys.get", + "parameterOrder": [ + "project", + "managedZone", + "dnsKeyId" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "digestType": { + "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + "location": "query", + "type": "string" + }, + "dnsKeyId": { + "description": "The identifier of the requested DnsKey.", + "location": "path", + "required": true, + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + "response": { + "$ref": "DnsKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + "httpMethod": "GET", + "id": "dns.dnsKeys.list", + "parameterOrder": [ + "project", + "managedZone" + ], + "parameters": { + "digestType": { + "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/managedZones/{managedZone}/dnsKeys", + "response": { + "$ref": "DnsKeysListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, + "managedZoneOperations": { + "methods": { + "get": { + "description": "Fetch the representation of an existing Operation.", + "httpMethod": "GET", + "id": "dns.managedZoneOperations.get", + "parameterOrder": [ + "project", + "managedZone", + "operation" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "operation": { + "description": "Identifies the operation addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/managedZones/{managedZone}/operations/{operation}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "list": { + "description": "Enumerate Operations for the given ManagedZone.", + "httpMethod": "GET", + "id": "dns.managedZoneOperations.list", + "parameterOrder": [ + "project", + "managedZone" + ], + "parameters": { + "managedZone": { + "description": "Identifies the managed zone addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "maxResults": { + "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + }, + "sortBy": { + "default": "startTime", + "description": "Sorting criterion. The only supported values are START_TIME and ID.", + "enum": [ + "id", + "startTime" + ], + "enumDescriptions": [ + "", + "" + ], + "location": "query", + "type": "string" + } + }, + "path": "{project}/managedZones/{managedZone}/operations", + "response": { + "$ref": "ManagedZoneOperationsListResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/ndev.clouddns.readonly", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + } + } + }, "managedZones": { "methods": { "create": { @@ -229,6 +449,11 @@ "project" ], "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, "project": { "description": "Identifies the project addressed by this request.", "location": "path", @@ -257,6 +482,11 @@ "managedZone" ], "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, "managedZone": { "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", "location": "path", @@ -285,6 +515,11 @@ "managedZone" ], "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, "managedZone": { "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", "location": "path", @@ -350,6 +585,84 @@ "https://www.googleapis.com/auth/ndev.clouddns.readonly", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" ] + }, + "patch": { + "description": "Update an existing ManagedZone. This method supports patch semantics.", + "httpMethod": "PATCH", + "id": "dns.managedZones.patch", + "parameterOrder": [ + "project", + "managedZone" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/managedZones/{managedZone}", + "request": { + "$ref": "ManagedZone" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] + }, + "update": { + "description": "Update an existing ManagedZone.", + "httpMethod": "PUT", + "id": "dns.managedZones.update", + "parameterOrder": [ + "project", + "managedZone" + ], + "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, + "managedZone": { + "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Identifies the project addressed by this request.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "{project}/managedZones/{managedZone}", + "request": { + "$ref": "ManagedZone" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + ] } } }, @@ -363,6 +676,11 @@ "project" ], "parameters": { + "clientOperationId": { + "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + "location": "query", + "type": "string" + }, "project": { "description": "Identifies the project addressed by this request.", "location": "path", @@ -442,7 +760,7 @@ } } }, - "revision": "20180123", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "Change": { @@ -467,6 +785,10 @@ "description": "Unique identifier for the resource; defined by the server (output only).", "type": "string" }, + "isServing": { + "description": "If the DNS queries for the zone will be served.", + "type": "boolean" + }, "kind": { "default": "dns#change", "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#change\".", @@ -502,6 +824,9 @@ }, "type": "array" }, + "header": { + "$ref": "ResponseHeader" + }, "kind": { "default": "dns#changesListResponse", "description": "Type of resource.", @@ -514,6 +839,182 @@ }, "type": "object" }, + "DnsKey": { + "description": "A DNSSEC key pair.", + "id": "DnsKey", + "properties": { + "algorithm": { + "description": "String mnemonic specifying the DNSSEC algorithm of this key. Immutable after creation time.", + "enum": [ + "ecdsap256sha256", + "ecdsap384sha384", + "rsasha1", + "rsasha256", + "rsasha512" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "creationTime": { + "description": "The time that this resource was created in the control plane. This is in RFC3339 text format. Output only.", + "type": "string" + }, + "description": { + "description": "A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the resource's function.", + "type": "string" + }, + "digests": { + "description": "Cryptographic hashes of the DNSKEY resource record associated with this DnsKey. These digests are needed to construct a DS record that points at this DNS key. Output only.", + "items": { + "$ref": "DnsKeyDigest" + }, + "type": "array" + }, + "id": { + "description": "Unique identifier for the resource; defined by the server (output only).", + "type": "string" + }, + "isActive": { + "description": "Active keys will be used to sign subsequent changes to the ManagedZone. Inactive keys will still be present as DNSKEY Resource Records for the use of resolvers validating existing signatures.", + "type": "boolean" + }, + "keyLength": { + "description": "Length of the key in bits. Specified at creation time then immutable.", + "format": "uint32", + "type": "integer" + }, + "keyTag": { + "description": "The key tag is a non-cryptographic hash of the a DNSKEY resource record associated with this DnsKey. The key tag can be used to identify a DNSKEY more quickly (but it is not a unique identifier). In particular, the key tag is used in a parent zone's DS record to point at the DNSKEY in this child ManagedZone. The key tag is a number in the range [0, 65535] and the algorithm to calculate it is specified in RFC4034 Appendix B. Output only.", + "format": "int32", + "type": "integer" + }, + "kind": { + "default": "dns#dnsKey", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#dnsKey\".", + "type": "string" + }, + "publicKey": { + "description": "Base64 encoded public half of this key. Output only.", + "type": "string" + }, + "type": { + "description": "One of \"KEY_SIGNING\" or \"ZONE_SIGNING\". Keys of type KEY_SIGNING have the Secure Entry Point flag set and, when active, will be used to sign only resource record sets of type DNSKEY. Otherwise, the Secure Entry Point flag will be cleared and this key will be used to sign only resource record sets of other types. Immutable after creation time.", + "enum": [ + "keySigning", + "zoneSigning" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "DnsKeyDigest": { + "id": "DnsKeyDigest", + "properties": { + "digest": { + "description": "The base-16 encoded bytes of this digest. Suitable for use in a DS resource record.", + "type": "string" + }, + "type": { + "description": "Specifies the algorithm used to calculate this digest.", + "enum": [ + "sha1", + "sha256", + "sha384" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "DnsKeySpec": { + "description": "Parameters for DnsKey key generation. Used for generating initial keys for a new ManagedZone and as default when adding a new DnsKey.", + "id": "DnsKeySpec", + "properties": { + "algorithm": { + "description": "String mnemonic specifying the DNSSEC algorithm of this key.", + "enum": [ + "ecdsap256sha256", + "ecdsap384sha384", + "rsasha1", + "rsasha256", + "rsasha512" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "keyLength": { + "description": "Length of the keys in bits.", + "format": "uint32", + "type": "integer" + }, + "keyType": { + "description": "One of \"KEY_SIGNING\" or \"ZONE_SIGNING\". Keys of type KEY_SIGNING have the Secure Entry Point flag set and, when active, will be used to sign only resource record sets of type DNSKEY. Otherwise, the Secure Entry Point flag will be cleared and this key will be used to sign only resource record sets of other types.", + "enum": [ + "keySigning", + "zoneSigning" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "kind": { + "default": "dns#dnsKeySpec", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#dnsKeySpec\".", + "type": "string" + } + }, + "type": "object" + }, + "DnsKeysListResponse": { + "description": "The response to a request to enumerate DnsKeys in a ManagedZone.", + "id": "DnsKeysListResponse", + "properties": { + "dnsKeys": { + "description": "The requested resources.", + "items": { + "$ref": "DnsKey" + }, + "type": "array" + }, + "header": { + "$ref": "ResponseHeader" + }, + "kind": { + "default": "dns#dnsKeysListResponse", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your pagination token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a \"snapshot\" of collections larger than the maximum page size.", + "type": "string" + } + }, + "type": "object" + }, "ManagedZone": { "description": "A zone is a subtree of the DNS namespace under one administrative responsibility. A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service.", "id": "ManagedZone", @@ -530,6 +1031,10 @@ "description": "The DNS name of this managed zone, for instance \"example.com.\".", "type": "string" }, + "dnssecConfig": { + "$ref": "ManagedZoneDnsSecConfig", + "description": "DNSSEC configuration." + }, "id": { "description": "Unique identifier for the resource; defined by the server (output only)", "format": "uint64", @@ -540,6 +1045,13 @@ "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "User labels.", + "type": "object" + }, "name": { "description": "User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.", "type": "string" @@ -558,9 +1070,81 @@ }, "type": "object" }, + "ManagedZoneDnsSecConfig": { + "id": "ManagedZoneDnsSecConfig", + "properties": { + "defaultKeySpecs": { + "description": "Specifies parameters that will be used for generating initial DnsKeys for this ManagedZone. Output only while state is not OFF.", + "items": { + "$ref": "DnsKeySpec" + }, + "type": "array" + }, + "kind": { + "default": "dns#managedZoneDnsSecConfig", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZoneDnsSecConfig\".", + "type": "string" + }, + "nonExistence": { + "description": "Specifies the mechanism used to provide authenticated denial-of-existence responses. Output only while state is not OFF.", + "enum": [ + "nsec", + "nsec3" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "state": { + "description": "Specifies whether DNSSEC is enabled, and what mode it is in.", + "enum": [ + "off", + "on", + "transfer" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "ManagedZoneOperationsListResponse": { + "id": "ManagedZoneOperationsListResponse", + "properties": { + "header": { + "$ref": "ResponseHeader" + }, + "kind": { + "default": "dns#managedZoneOperationsListResponse", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "The presence of this field indicates that there exist more results following your last page of results in pagination order. To fetch them, make another list request using this value as your page token.\n\nIn this way you can retrieve the complete contents of even very large collections one page at a time. However, if the contents of the collection change between the first and last paginated list request, the set of all elements returned will be an inconsistent view of the collection. There is no way to retrieve a consistent snapshot of a collection larger than the maximum page size.", + "type": "string" + }, + "operations": { + "description": "The operation resources.", + "items": { + "$ref": "Operation" + }, + "type": "array" + } + }, + "type": "object" + }, "ManagedZonesListResponse": { "id": "ManagedZonesListResponse", "properties": { + "header": { + "$ref": "ResponseHeader" + }, "kind": { "default": "dns#managedZonesListResponse", "description": "Type of resource.", @@ -580,6 +1164,82 @@ }, "type": "object" }, + "Operation": { + "description": "An operation represents a successful mutation performed on a Cloud DNS resource. Operations provide: - An audit log of server resource mutations. - A way to recover/retry API calls in the case where the response is never received by the caller. Use the caller specified client_operation_id.", + "id": "Operation", + "properties": { + "dnsKeyContext": { + "$ref": "OperationDnsKeyContext", + "description": "Only populated if the operation targeted a DnsKey (output only)." + }, + "id": { + "description": "Unique identifier for the resource. This is the client_operation_id if the client specified it when the mutation was initiated, otherwise, it is generated by the server. The name must be 1-63 characters long and match the regular expression [-a-z0-9]? (output only)", + "type": "string" + }, + "kind": { + "default": "dns#operation", + "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#operation\".", + "type": "string" + }, + "startTime": { + "description": "The time that this operation was started by the server. This is in RFC3339 text format (output only).", + "type": "string" + }, + "status": { + "description": "Status of the operation. Can be one of the following: \"PENDING\" or \"DONE\" (output only).", + "enum": [ + "done", + "pending" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "type": { + "description": "Type of the operation. Operations include insert, update, and delete (output only).", + "type": "string" + }, + "user": { + "description": "User who requested the operation, for example: user@example.com. cloud-dns-system for operations automatically done by the system. (output only)", + "type": "string" + }, + "zoneContext": { + "$ref": "OperationManagedZoneContext", + "description": "Only populated if the operation targeted a ManagedZone (output only)." + } + }, + "type": "object" + }, + "OperationDnsKeyContext": { + "id": "OperationDnsKeyContext", + "properties": { + "newValue": { + "$ref": "DnsKey", + "description": "The post-operation DnsKey resource." + }, + "oldValue": { + "$ref": "DnsKey", + "description": "The pre-operation DnsKey resource." + } + }, + "type": "object" + }, + "OperationManagedZoneContext": { + "id": "OperationManagedZoneContext", + "properties": { + "newValue": { + "$ref": "ManagedZone", + "description": "The post-operation ManagedZone resource." + }, + "oldValue": { + "$ref": "ManagedZone", + "description": "The pre-operation ManagedZone resource." + } + }, + "type": "object" + }, "Project": { "description": "A project resource. The project is a top level container for resources including Cloud DNS ManagedZones. Projects can be created only in the APIs console.", "id": "Project", @@ -609,6 +1269,11 @@ "description": "Limits associated with a Project.", "id": "Quota", "properties": { + "dnsKeysPerManagedZone": { + "description": "Maximum allowed number of DnsKeys per ManagedZone.", + "format": "int32", + "type": "integer" + }, "kind": { "default": "dns#quota", "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#quota\".", @@ -643,6 +1308,13 @@ "description": "Maximum allowed size for total rrdata in one ChangesCreateRequest in bytes.", "format": "int32", "type": "integer" + }, + "whitelistedKeySpecs": { + "description": "DNSSEC algorithm and key length types that can be used for DnsKeys.", + "items": { + "$ref": "DnsKeySpec" + }, + "type": "array" } }, "type": "object" @@ -667,6 +1339,13 @@ }, "type": "array" }, + "signatureRrdatas": { + "description": "As defined in RFC 4034 (section 3.2).", + "items": { + "type": "string" + }, + "type": "array" + }, "ttl": { "description": "Number of seconds that this ResourceRecordSet can be cached by resolvers.", "format": "int32", @@ -682,6 +1361,9 @@ "ResourceRecordSetsListResponse": { "id": "ResourceRecordSetsListResponse", "properties": { + "header": { + "$ref": "ResponseHeader" + }, "kind": { "default": "dns#resourceRecordSetsListResponse", "description": "Type of resource.", @@ -700,6 +1382,17 @@ } }, "type": "object" + }, + "ResponseHeader": { + "description": "Elements common to every response.", + "id": "ResponseHeader", + "properties": { + "operationId": { + "description": "For mutating operation requests that completed successfully. This is the client_operation_id if the client specified it, otherwise it is generated by the server (output only).", + "type": "string" + } + }, + "type": "object" } }, "servicePath": "dns/v1/projects/", diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 491f8d91a..559f66983 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -66,6 +66,8 @@ func New(client *http.Client) (*Service, error) { } s := &Service{client: client, BasePath: basePath} s.Changes = NewChangesService(s) + s.DnsKeys = NewDnsKeysService(s) + s.ManagedZoneOperations = NewManagedZoneOperationsService(s) s.ManagedZones = NewManagedZonesService(s) s.Projects = NewProjectsService(s) s.ResourceRecordSets = NewResourceRecordSetsService(s) @@ -79,6 +81,10 @@ type Service struct { Changes *ChangesService + DnsKeys *DnsKeysService + + ManagedZoneOperations *ManagedZoneOperationsService + ManagedZones *ManagedZonesService Projects *ProjectsService @@ -102,6 +108,24 @@ type ChangesService struct { s *Service } +func NewDnsKeysService(s *Service) *DnsKeysService { + rs := &DnsKeysService{s: s} + return rs +} + +type DnsKeysService struct { + s *Service +} + +func NewManagedZoneOperationsService(s *Service) *ManagedZoneOperationsService { + rs := &ManagedZoneOperationsService{s: s} + return rs +} + +type ManagedZoneOperationsService struct { + s *Service +} + func NewManagedZonesService(s *Service) *ManagedZonesService { rs := &ManagedZonesService{s: s} return rs @@ -142,6 +166,9 @@ type Change struct { // only). Id string `json:"id,omitempty"` + // IsServing: If the DNS queries for the zone will be served. + IsServing bool `json:"isServing,omitempty"` + // Kind: Identifies what kind of resource this is. Value: the fixed // string "dns#change". Kind string `json:"kind,omitempty"` @@ -190,6 +217,8 @@ type ChangesListResponse struct { // Changes: The requested changes. Changes []*Change `json:"changes,omitempty"` + Header *ResponseHeader `json:"header,omitempty"` + // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -233,6 +262,244 @@ func (s *ChangesListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DnsKey: A DNSSEC key pair. +type DnsKey struct { + // Algorithm: String mnemonic specifying the DNSSEC algorithm of this + // key. Immutable after creation time. + // + // Possible values: + // "ecdsap256sha256" + // "ecdsap384sha384" + // "rsasha1" + // "rsasha256" + // "rsasha512" + Algorithm string `json:"algorithm,omitempty"` + + // CreationTime: The time that this resource was created in the control + // plane. This is in RFC3339 text format. Output only. + CreationTime string `json:"creationTime,omitempty"` + + // Description: A mutable string of at most 1024 characters associated + // with this resource for the user's convenience. Has no effect on the + // resource's function. + Description string `json:"description,omitempty"` + + // Digests: Cryptographic hashes of the DNSKEY resource record + // associated with this DnsKey. These digests are needed to construct a + // DS record that points at this DNS key. Output only. + Digests []*DnsKeyDigest `json:"digests,omitempty"` + + // Id: Unique identifier for the resource; defined by the server (output + // only). + Id string `json:"id,omitempty"` + + // IsActive: Active keys will be used to sign subsequent changes to the + // ManagedZone. Inactive keys will still be present as DNSKEY Resource + // Records for the use of resolvers validating existing signatures. + IsActive bool `json:"isActive,omitempty"` + + // KeyLength: Length of the key in bits. Specified at creation time then + // immutable. + KeyLength int64 `json:"keyLength,omitempty"` + + // KeyTag: The key tag is a non-cryptographic hash of the a DNSKEY + // resource record associated with this DnsKey. The key tag can be used + // to identify a DNSKEY more quickly (but it is not a unique + // identifier). In particular, the key tag is used in a parent zone's DS + // record to point at the DNSKEY in this child ManagedZone. The key tag + // is a number in the range [0, 65535] and the algorithm to calculate it + // is specified in RFC4034 Appendix B. Output only. + KeyTag int64 `json:"keyTag,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#dnsKey". + Kind string `json:"kind,omitempty"` + + // PublicKey: Base64 encoded public half of this key. Output only. + PublicKey string `json:"publicKey,omitempty"` + + // Type: One of "KEY_SIGNING" or "ZONE_SIGNING". Keys of type + // KEY_SIGNING have the Secure Entry Point flag set and, when active, + // will be used to sign only resource record sets of type DNSKEY. + // Otherwise, the Secure Entry Point flag will be cleared and this key + // will be used to sign only resource record sets of other types. + // Immutable after creation time. + // + // Possible values: + // "keySigning" + // "zoneSigning" + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Algorithm") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Algorithm") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsKey) MarshalJSON() ([]byte, error) { + type NoMethod DnsKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DnsKeyDigest struct { + // Digest: The base-16 encoded bytes of this digest. Suitable for use in + // a DS resource record. + Digest string `json:"digest,omitempty"` + + // Type: Specifies the algorithm used to calculate this digest. + // + // Possible values: + // "sha1" + // "sha256" + // "sha384" + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Digest") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Digest") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsKeyDigest) MarshalJSON() ([]byte, error) { + type NoMethod DnsKeyDigest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DnsKeySpec: Parameters for DnsKey key generation. Used for generating +// initial keys for a new ManagedZone and as default when adding a new +// DnsKey. +type DnsKeySpec struct { + // Algorithm: String mnemonic specifying the DNSSEC algorithm of this + // key. + // + // Possible values: + // "ecdsap256sha256" + // "ecdsap384sha384" + // "rsasha1" + // "rsasha256" + // "rsasha512" + Algorithm string `json:"algorithm,omitempty"` + + // KeyLength: Length of the keys in bits. + KeyLength int64 `json:"keyLength,omitempty"` + + // KeyType: One of "KEY_SIGNING" or "ZONE_SIGNING". Keys of type + // KEY_SIGNING have the Secure Entry Point flag set and, when active, + // will be used to sign only resource record sets of type DNSKEY. + // Otherwise, the Secure Entry Point flag will be cleared and this key + // will be used to sign only resource record sets of other types. + // + // Possible values: + // "keySigning" + // "zoneSigning" + KeyType string `json:"keyType,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#dnsKeySpec". + Kind string `json:"kind,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Algorithm") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Algorithm") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsKeySpec) MarshalJSON() ([]byte, error) { + type NoMethod DnsKeySpec + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// DnsKeysListResponse: The response to a request to enumerate DnsKeys +// in a ManagedZone. +type DnsKeysListResponse struct { + // DnsKeys: The requested resources. + DnsKeys []*DnsKey `json:"dnsKeys,omitempty"` + + Header *ResponseHeader `json:"header,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // pagination token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a "snapshot" of collections + // larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DnsKeys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DnsKeys") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DnsKeysListResponse) MarshalJSON() ([]byte, error) { + type NoMethod DnsKeysListResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ManagedZone: A zone is a subtree of the DNS namespace under one // administrative responsibility. A ManagedZone is a resource that // represents a DNS zone hosted by the Cloud DNS service. @@ -250,6 +517,9 @@ type ManagedZone struct { // "example.com.". DnsName string `json:"dnsName,omitempty"` + // DnssecConfig: DNSSEC configuration. + DnssecConfig *ManagedZoneDnsSecConfig `json:"dnssecConfig,omitempty"` + // Id: Unique identifier for the resource; defined by the server (output // only) Id uint64 `json:"id,omitempty,string"` @@ -258,6 +528,9 @@ type ManagedZone struct { // string "dns#managedZone". Kind string `json:"kind,omitempty"` + // Labels: User labels. + Labels map[string]string `json:"labels,omitempty"` + // Name: User assigned name for this resource. Must be unique within the // project. The name must be 1-63 characters long, must begin with a // letter, end with a letter or digit, and only contain lowercase @@ -300,7 +573,108 @@ func (s *ManagedZone) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedZoneDnsSecConfig struct { + // DefaultKeySpecs: Specifies parameters that will be used for + // generating initial DnsKeys for this ManagedZone. Output only while + // state is not OFF. + DefaultKeySpecs []*DnsKeySpec `json:"defaultKeySpecs,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#managedZoneDnsSecConfig". + Kind string `json:"kind,omitempty"` + + // NonExistence: Specifies the mechanism used to provide authenticated + // denial-of-existence responses. Output only while state is not OFF. + // + // Possible values: + // "nsec" + // "nsec3" + NonExistence string `json:"nonExistence,omitempty"` + + // State: Specifies whether DNSSEC is enabled, and what mode it is in. + // + // Possible values: + // "off" + // "on" + // "transfer" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultKeySpecs") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultKeySpecs") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZoneDnsSecConfig) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZoneDnsSecConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ManagedZoneOperationsListResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The presence of this field indicates that there exist + // more results following your last page of results in pagination order. + // To fetch them, make another list request using this value as your + // page token. + // + // In this way you can retrieve the complete contents of even very large + // collections one page at a time. However, if the contents of the + // collection change between the first and last paginated list request, + // the set of all elements returned will be an inconsistent view of the + // collection. There is no way to retrieve a consistent snapshot of a + // collection larger than the maximum page size. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: The operation resources. + Operations []*Operation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Header") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Header") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { + type NoMethod ManagedZoneOperationsListResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedZonesListResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -324,7 +698,7 @@ type ManagedZonesListResponse struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Header") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -332,7 +706,7 @@ type ManagedZonesListResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Header") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -347,6 +721,139 @@ func (s *ManagedZonesListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Operation: An operation represents a successful mutation performed on +// a Cloud DNS resource. Operations provide: - An audit log of server +// resource mutations. - A way to recover/retry API calls in the case +// where the response is never received by the caller. Use the caller +// specified client_operation_id. +type Operation struct { + // DnsKeyContext: Only populated if the operation targeted a DnsKey + // (output only). + DnsKeyContext *OperationDnsKeyContext `json:"dnsKeyContext,omitempty"` + + // Id: Unique identifier for the resource. This is the + // client_operation_id if the client specified it when the mutation was + // initiated, otherwise, it is generated by the server. The name must be + // 1-63 characters long and match the regular expression [-a-z0-9]? + // (output only) + Id string `json:"id,omitempty"` + + // Kind: Identifies what kind of resource this is. Value: the fixed + // string "dns#operation". + Kind string `json:"kind,omitempty"` + + // StartTime: The time that this operation was started by the server. + // This is in RFC3339 text format (output only). + StartTime string `json:"startTime,omitempty"` + + // Status: Status of the operation. Can be one of the following: + // "PENDING" or "DONE" (output only). + // + // Possible values: + // "done" + // "pending" + Status string `json:"status,omitempty"` + + // Type: Type of the operation. Operations include insert, update, and + // delete (output only). + Type string `json:"type,omitempty"` + + // User: User who requested the operation, for example: + // user@example.com. cloud-dns-system for operations automatically done + // by the system. (output only) + User string `json:"user,omitempty"` + + // ZoneContext: Only populated if the operation targeted a ManagedZone + // (output only). + ZoneContext *OperationManagedZoneContext `json:"zoneContext,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "DnsKeyContext") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DnsKeyContext") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Operation) MarshalJSON() ([]byte, error) { + type NoMethod Operation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationDnsKeyContext struct { + // NewValue: The post-operation DnsKey resource. + NewValue *DnsKey `json:"newValue,omitempty"` + + // OldValue: The pre-operation DnsKey resource. + OldValue *DnsKey `json:"oldValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NewValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NewValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationDnsKeyContext) MarshalJSON() ([]byte, error) { + type NoMethod OperationDnsKeyContext + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type OperationManagedZoneContext struct { + // NewValue: The post-operation ManagedZone resource. + NewValue *ManagedZone `json:"newValue,omitempty"` + + // OldValue: The pre-operation ManagedZone resource. + OldValue *ManagedZone `json:"oldValue,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NewValue") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NewValue") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *OperationManagedZoneContext) MarshalJSON() ([]byte, error) { + type NoMethod OperationManagedZoneContext + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Project: A project resource. The project is a top level container for // resources including Cloud DNS ManagedZones. Projects can be created // only in the APIs console. @@ -394,6 +901,10 @@ func (s *Project) MarshalJSON() ([]byte, error) { // Quota: Limits associated with a Project. type Quota struct { + // DnsKeysPerManagedZone: Maximum allowed number of DnsKeys per + // ManagedZone. + DnsKeysPerManagedZone int64 `json:"dnsKeysPerManagedZone,omitempty"` + // Kind: Identifies what kind of resource this is. Value: the fixed // string "dns#quota". Kind string `json:"kind,omitempty"` @@ -421,20 +932,26 @@ type Quota struct { // one ChangesCreateRequest in bytes. TotalRrdataSizePerChange int64 `json:"totalRrdataSizePerChange,omitempty"` - // ForceSendFields is a list of field names (e.g. "Kind") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // WhitelistedKeySpecs: DNSSEC algorithm and key length types that can + // be used for DnsKeys. + WhitelistedKeySpecs []*DnsKeySpec `json:"whitelistedKeySpecs,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "DnsKeysPerManagedZone") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DnsKeysPerManagedZone") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -458,6 +975,9 @@ type ResourceRecordSet struct { // 3.6.1). Rrdatas []string `json:"rrdatas,omitempty"` + // SignatureRrdatas: As defined in RFC 4034 (section 3.2). + SignatureRrdatas []string `json:"signatureRrdatas,omitempty"` + // Ttl: Number of seconds that this ResourceRecordSet can be cached by // resolvers. Ttl int64 `json:"ttl,omitempty"` @@ -490,6 +1010,8 @@ func (s *ResourceRecordSet) MarshalJSON() ([]byte, error) { } type ResourceRecordSetsListResponse struct { + Header *ResponseHeader `json:"header,omitempty"` + // Kind: Type of resource. Kind string `json:"kind,omitempty"` @@ -513,7 +1035,7 @@ type ResourceRecordSetsListResponse struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Kind") to + // ForceSendFields is a list of field names (e.g. "Header") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -521,7 +1043,7 @@ type ResourceRecordSetsListResponse struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Kind") to include in API + // NullFields is a list of field names (e.g. "Header") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -536,6 +1058,36 @@ func (s *ResourceRecordSetsListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ResponseHeader: Elements common to every response. +type ResponseHeader struct { + // OperationId: For mutating operation requests that completed + // successfully. This is the client_operation_id if the client specified + // it, otherwise it is generated by the server (output only). + OperationId string `json:"operationId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "OperationId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "OperationId") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ResponseHeader) MarshalJSON() ([]byte, error) { + type NoMethod ResponseHeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // method id "dns.changes.create": type ChangesCreateCall struct { @@ -557,6 +1109,15 @@ func (r *ChangesService) Create(project string, managedZone string, change *Chan return c } +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ChangesCreateCall) ClientOperationId(clientOperationId string) *ChangesCreateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -652,6 +1213,11 @@ func (c *ChangesCreateCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "managedZone" // ], // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, // "managedZone": { // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", // "location": "path", @@ -702,6 +1268,15 @@ func (r *ChangesService) Get(project string, managedZone string, changeId string return c } +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ChangesGetCall) ClientOperationId(clientOperationId string) *ChangesGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -813,6 +1388,11 @@ func (c *ChangesGetCall) Do(opts ...googleapi.CallOption) (*Change, error) { // "required": true, // "type": "string" // }, + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, // "managedZone": { // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", // "location": "path", @@ -1072,6 +1652,804 @@ func (c *ChangesListCall) Pages(ctx context.Context, f func(*ChangesListResponse } } +// method id "dns.dnsKeys.get": + +type DnsKeysGetCall struct { + s *Service + project string + managedZone string + dnsKeyId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetch the representation of an existing DnsKey. +func (r *DnsKeysService) Get(project string, managedZone string, dnsKeyId string) *DnsKeysGetCall { + c := &DnsKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.dnsKeyId = dnsKeyId + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *DnsKeysGetCall) ClientOperationId(clientOperationId string) *DnsKeysGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// DigestType sets the optional parameter "digestType": An optional +// comma-separated list of digest types to compute and display for key +// signing keys. If omitted, the recommended digest type will be +// computed and displayed. +func (c *DnsKeysGetCall) DigestType(digestType string) *DnsKeysGetCall { + c.urlParams_.Set("digestType", digestType) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DnsKeysGetCall) Fields(s ...googleapi.Field) *DnsKeysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DnsKeysGetCall) IfNoneMatch(entityTag string) *DnsKeysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DnsKeysGetCall) Context(ctx context.Context) *DnsKeysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DnsKeysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DnsKeysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "dnsKeyId": c.dnsKeyId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.dnsKeys.get" call. +// Exactly one of *DnsKey or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *DnsKey.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *DnsKeysGetCall) Do(opts ...googleapi.CallOption) (*DnsKey, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DnsKey{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing DnsKey.", + // "httpMethod": "GET", + // "id": "dns.dnsKeys.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "dnsKeyId" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "digestType": { + // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + // "location": "query", + // "type": "string" + // }, + // "dnsKeyId": { + // "description": "The identifier of the requested DnsKey.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/dnsKeys/{dnsKeyId}", + // "response": { + // "$ref": "DnsKey" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.dnsKeys.list": + +type DnsKeysListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerate DnsKeys to a ResourceRecordSet collection. +func (r *DnsKeysService) List(project string, managedZone string) *DnsKeysListCall { + c := &DnsKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// DigestType sets the optional parameter "digestType": An optional +// comma-separated list of digest types to compute and display for key +// signing keys. If omitted, the recommended digest type will be +// computed and displayed. +func (c *DnsKeysListCall) DigestType(digestType string) *DnsKeysListCall { + c.urlParams_.Set("digestType", digestType) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *DnsKeysListCall) MaxResults(maxResults int64) *DnsKeysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *DnsKeysListCall) PageToken(pageToken string) *DnsKeysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DnsKeysListCall) Fields(s ...googleapi.Field) *DnsKeysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DnsKeysListCall) IfNoneMatch(entityTag string) *DnsKeysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DnsKeysListCall) Context(ctx context.Context) *DnsKeysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DnsKeysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DnsKeysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/dnsKeys") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.dnsKeys.list" call. +// Exactly one of *DnsKeysListResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *DnsKeysListResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DnsKeysListCall) Do(opts ...googleapi.CallOption) (*DnsKeysListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &DnsKeysListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate DnsKeys to a ResourceRecordSet collection.", + // "httpMethod": "GET", + // "id": "dns.dnsKeys.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "digestType": { + // "description": "An optional comma-separated list of digest types to compute and display for key signing keys. If omitted, the recommended digest type will be computed and displayed.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/dnsKeys", + // "response": { + // "$ref": "DnsKeysListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *DnsKeysListCall) Pages(ctx context.Context, f func(*DnsKeysListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "dns.managedZoneOperations.get": + +type ManagedZoneOperationsGetCall struct { + s *Service + project string + managedZone string + operation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Fetch the representation of an existing Operation. +func (r *ManagedZoneOperationsService) Get(project string, managedZone string, operation string) *ManagedZoneOperationsGetCall { + c := &ManagedZoneOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.operation = operation + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZoneOperationsGetCall) ClientOperationId(clientOperationId string) *ManagedZoneOperationsGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZoneOperationsGetCall) Fields(s ...googleapi.Field) *ManagedZoneOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZoneOperationsGetCall) IfNoneMatch(entityTag string) *ManagedZoneOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZoneOperationsGetCall) Context(ctx context.Context) *ManagedZoneOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZoneOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations/{operation}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + "operation": c.operation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZoneOperations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Fetch the representation of an existing Operation.", + // "httpMethod": "GET", + // "id": "dns.managedZoneOperations.get", + // "parameterOrder": [ + // "project", + // "managedZone", + // "operation" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "operation": { + // "description": "Identifies the operation addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/operations/{operation}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZoneOperations.list": + +type ManagedZoneOperationsListCall struct { + s *Service + project string + managedZone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Enumerate Operations for the given ManagedZone. +func (r *ManagedZoneOperationsService) List(project string, managedZone string) *ManagedZoneOperationsListCall { + c := &ManagedZoneOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of results to be returned. If unspecified, the server will decide how +// many results to return. +func (c *ManagedZoneOperationsListCall) MaxResults(maxResults int64) *ManagedZoneOperationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A tag returned by +// a previous list request that was truncated. Use this parameter to +// continue a previous list request. +func (c *ManagedZoneOperationsListCall) PageToken(pageToken string) *ManagedZoneOperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// SortBy sets the optional parameter "sortBy": Sorting criterion. The +// only supported values are START_TIME and ID. +// +// Possible values: +// "id" +// "startTime" (default) +func (c *ManagedZoneOperationsListCall) SortBy(sortBy string) *ManagedZoneOperationsListCall { + c.urlParams_.Set("sortBy", sortBy) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZoneOperationsListCall) Fields(s ...googleapi.Field) *ManagedZoneOperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedZoneOperationsListCall) IfNoneMatch(entityTag string) *ManagedZoneOperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZoneOperationsListCall) Context(ctx context.Context) *ManagedZoneOperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZoneOperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}/operations") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZoneOperations.list" call. +// Exactly one of *ManagedZoneOperationsListResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ManagedZoneOperationsListResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *ManagedZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*ManagedZoneOperationsListResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ManagedZoneOperationsListResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Enumerate Operations for the given ManagedZone.", + // "httpMethod": "GET", + // "id": "dns.managedZoneOperations.list", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "description": "Optional. Maximum number of results to be returned. If unspecified, the server will decide how many results to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Optional. A tag returned by a previous list request that was truncated. Use this parameter to continue a previous list request.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sortBy": { + // "default": "startTime", + // "description": "Sorting criterion. The only supported values are START_TIME and ID.", + // "enum": [ + // "id", + // "startTime" + // ], + // "enumDescriptions": [ + // "", + // "" + // ], + // "location": "query", + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}/operations", + // "response": { + // "$ref": "ManagedZoneOperationsListResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/ndev.clouddns.readonly", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedZoneOperationsListCall) Pages(ctx context.Context, f func(*ManagedZoneOperationsListResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "dns.managedZones.create": type ManagedZonesCreateCall struct { @@ -1091,6 +2469,15 @@ func (r *ManagedZonesService) Create(project string, managedzone *ManagedZone) * return c } +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesCreateCall) ClientOperationId(clientOperationId string) *ManagedZonesCreateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1184,6 +2571,11 @@ func (c *ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (*ManagedZone, // "project" // ], // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", @@ -1225,6 +2617,15 @@ func (r *ManagedZonesService) Delete(project string, managedZone string) *Manage return c } +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesDeleteCall) ClientOperationId(clientOperationId string) *ManagedZonesDeleteCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1290,6 +2691,11 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { // "managedZone" // ], // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, // "managedZone": { // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", // "location": "path", @@ -1332,6 +2738,15 @@ func (r *ManagedZonesService) Get(project string, managedZone string) *ManagedZo return c } +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesGetCall) ClientOperationId(clientOperationId string) *ManagedZonesGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1435,6 +2850,11 @@ func (c *ManagedZonesGetCall) Do(opts ...googleapi.CallOption) (*ManagedZone, er // "managedZone" // ], // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, // "managedZone": { // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", // "location": "path", @@ -1663,6 +3083,323 @@ func (c *ManagedZonesListCall) Pages(ctx context.Context, f func(*ManagedZonesLi } } +// method id "dns.managedZones.patch": + +type ManagedZonesPatchCall struct { + s *Service + project string + managedZone string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Update an existing ManagedZone. This method supports patch +// semantics. +func (r *ManagedZonesService) Patch(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesPatchCall { + c := &ManagedZonesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.managedzone = managedzone + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesPatchCall) ClientOperationId(clientOperationId string) *ManagedZonesPatchCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesPatchCall) Fields(s ...googleapi.Field) *ManagedZonesPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesPatchCall) Context(ctx context.Context) *ManagedZonesPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update an existing ManagedZone. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "dns.managedZones.patch", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}", + // "request": { + // "$ref": "ManagedZone" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + +// method id "dns.managedZones.update": + +type ManagedZonesUpdateCall struct { + s *Service + project string + managedZone string + managedzone *ManagedZone + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Update an existing ManagedZone. +func (r *ManagedZonesService) Update(project string, managedZone string, managedzone *ManagedZone) *ManagedZonesUpdateCall { + c := &ManagedZonesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.managedZone = managedZone + c.managedzone = managedzone + return c +} + +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ManagedZonesUpdateCall) ClientOperationId(clientOperationId string) *ManagedZonesUpdateCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedZonesUpdateCall) Fields(s ...googleapi.Field) *ManagedZonesUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedZonesUpdateCall) Context(ctx context.Context) *ManagedZonesUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedZonesUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedZonesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedzone) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/managedZones/{managedZone}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "managedZone": c.managedZone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "dns.managedZones.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ManagedZonesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Update an existing ManagedZone.", + // "httpMethod": "PUT", + // "id": "dns.managedZones.update", + // "parameterOrder": [ + // "project", + // "managedZone" + // ], + // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, + // "managedZone": { + // "description": "Identifies the managed zone addressed by this request. Can be the managed zone name or id.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Identifies the project addressed by this request.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/managedZones/{managedZone}", + // "request": { + // "$ref": "ManagedZone" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" + // ] + // } + +} + // method id "dns.projects.get": type ProjectsGetCall struct { @@ -1681,6 +3418,15 @@ func (r *ProjectsService) Get(project string) *ProjectsGetCall { return c } +// ClientOperationId sets the optional parameter "clientOperationId": +// For mutating operation requests only. An optional identifier +// specified by the client. Must be unique for operation resources in +// the Operations collection. +func (c *ProjectsGetCall) ClientOperationId(clientOperationId string) *ProjectsGetCall { + c.urlParams_.Set("clientOperationId", clientOperationId) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -1782,6 +3528,11 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { // "project" // ], // "parameters": { + // "clientOperationId": { + // "description": "For mutating operation requests only. An optional identifier specified by the client. Must be unique for operation resources in the Operations collection.", + // "location": "query", + // "type": "string" + // }, // "project": { // "description": "Identifies the project addressed by this request.", // "location": "path", diff --git a/vendor/google.golang.org/api/dns/v1beta2/dns-api.json b/vendor/google.golang.org/api/dns/v1beta2/dns-api.json index e5ca65d99..e79240a18 100644 --- a/vendor/google.golang.org/api/dns/v1beta2/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1beta2/dns-api.json @@ -23,7 +23,7 @@ "description": "Configures and serves authoritative DNS records.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/cloud-dns", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/SuyHwKhP-B9n7DrZc4jJuDANomI\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/e5F2mTs50sJnLnMkzFkqjWW6G1k\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -501,9 +501,6 @@ } }, "path": "{project}/managedZones/{managedZone}", - "response": { - "$ref": "ManagedZonesDeleteResponse" - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -763,7 +760,7 @@ } } }, - "revision": "20180123", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "Change": { @@ -1142,15 +1139,6 @@ }, "type": "object" }, - "ManagedZonesDeleteResponse": { - "id": "ManagedZonesDeleteResponse", - "properties": { - "header": { - "$ref": "ResponseHeader" - } - }, - "type": "object" - }, "ManagedZonesListResponse": { "id": "ManagedZonesListResponse", "properties": { diff --git a/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go b/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go index fcb59202a..f0f359dbe 100644 --- a/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1beta2/dns-gen.go @@ -672,36 +672,6 @@ func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedZonesDeleteResponse struct { - Header *ResponseHeader `json:"header,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Header") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Header") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedZonesDeleteResponse) MarshalJSON() ([]byte, error) { - type NoMethod ManagedZonesDeleteResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - type ManagedZonesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -2701,42 +2671,17 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "dns.managedZones.delete" call. -// Exactly one of *ManagedZonesDeleteResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ManagedZonesDeleteResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) (*ManagedZonesDeleteResponse, error) { +func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return err } - ret := &ManagedZonesDeleteResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil + return nil // { // "description": "Delete a previously created ManagedZone.", // "httpMethod": "DELETE", @@ -2765,9 +2710,6 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) (*ManagedZones // } // }, // "path": "{project}/managedZones/{managedZone}", - // "response": { - // "$ref": "ManagedZonesDeleteResponse" - // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" diff --git a/vendor/google.golang.org/api/dns/v2beta1/dns-api.json b/vendor/google.golang.org/api/dns/v2beta1/dns-api.json index 261f37c9c..cd19f14a8 100644 --- a/vendor/google.golang.org/api/dns/v2beta1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v2beta1/dns-api.json @@ -23,7 +23,7 @@ "description": "Configures and serves authoritative DNS records.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/cloud-dns", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/A5BDKbKQemrrtwPxFAsqazMm3yE\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/LxBnCwWxzMjcp95c1imUQ5t4wRU\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -501,9 +501,6 @@ } }, "path": "{project}/managedZones/{managedZone}", - "response": { - "$ref": "ManagedZonesDeleteResponse" - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite" @@ -763,7 +760,7 @@ } } }, - "revision": "20180123", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "Change": { @@ -1048,6 +1045,13 @@ "description": "Identifies what kind of resource this is. Value: the fixed string \"dns#managedZone\".", "type": "string" }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "User labels.", + "type": "object" + }, "name": { "description": "User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.", "type": "string" @@ -1135,15 +1139,6 @@ }, "type": "object" }, - "ManagedZonesDeleteResponse": { - "id": "ManagedZonesDeleteResponse", - "properties": { - "header": { - "$ref": "ResponseHeader" - } - }, - "type": "object" - }, "ManagedZonesListResponse": { "id": "ManagedZonesListResponse", "properties": { diff --git a/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go b/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go index e694a0c93..c0cc2a61b 100644 --- a/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v2beta1/dns-gen.go @@ -528,6 +528,9 @@ type ManagedZone struct { // string "dns#managedZone". Kind string `json:"kind,omitempty"` + // Labels: User labels. + Labels map[string]string `json:"labels,omitempty"` + // Name: User assigned name for this resource. Must be unique within the // project. The name must be 1-63 characters long, must begin with a // letter, end with a letter or digit, and only contain lowercase @@ -669,36 +672,6 @@ func (s *ManagedZoneOperationsListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type ManagedZonesDeleteResponse struct { - Header *ResponseHeader `json:"header,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Header") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Header") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *ManagedZonesDeleteResponse) MarshalJSON() ([]byte, error) { - type NoMethod ManagedZonesDeleteResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - type ManagedZonesListResponse struct { Header *ResponseHeader `json:"header,omitempty"` @@ -2698,42 +2671,17 @@ func (c *ManagedZonesDeleteCall) doRequest(alt string) (*http.Response, error) { } // Do executes the "dns.managedZones.delete" call. -// Exactly one of *ManagedZonesDeleteResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *ManagedZonesDeleteResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) (*ManagedZonesDeleteResponse, error) { +func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - } - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, err + return err } - ret := &ManagedZonesDeleteResponse{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil + return nil // { // "description": "Delete a previously created ManagedZone.", // "httpMethod": "DELETE", @@ -2762,9 +2710,6 @@ func (c *ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) (*ManagedZones // } // }, // "path": "{project}/managedZones/{managedZone}", - // "response": { - // "$ref": "ManagedZonesDeleteResponse" - // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/ndev.clouddns.readwrite" diff --git a/vendor/google.golang.org/api/firestore/v1beta1/firestore-api.json b/vendor/google.golang.org/api/firestore/v1beta1/firestore-api.json index 125e11e3b..287ac2256 100644 --- a/vendor/google.golang.org/api/firestore/v1beta1/firestore-api.json +++ b/vendor/google.golang.org/api/firestore/v1beta1/firestore-api.json @@ -741,7 +741,7 @@ } } }, - "revision": "20180124", + "revision": "20180321", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "ArrayValue": { @@ -956,7 +956,7 @@ "type": "string" }, "updateTime": { - "description": "Output only. The time at which the document was last changed.\n\nThis value is initally set to the `create_time` then increases\nmonotonically with each change to the document. It can also be\ncompared to values from other documents and the `read_time` of a query.", + "description": "Output only. The time at which the document was last changed.\n\nThis value is initially set to the `create_time` then increases\nmonotonically with each change to the document. It can also be\ncompared to values from other documents and the `read_time` of a query.", "format": "google-datetime", "type": "string" } diff --git a/vendor/google.golang.org/api/firestore/v1beta1/firestore-gen.go b/vendor/google.golang.org/api/firestore/v1beta1/firestore-gen.go index 6fcd9e5e8..096501f17 100644 --- a/vendor/google.golang.org/api/firestore/v1beta1/firestore-gen.go +++ b/vendor/google.golang.org/api/firestore/v1beta1/firestore-gen.go @@ -569,7 +569,7 @@ type Document struct { // UpdateTime: Output only. The time at which the document was last // changed. // - // This value is initally set to the `create_time` then + // This value is initially set to the `create_time` then // increases // monotonically with each change to the document. It can also // be diff --git a/vendor/google.golang.org/api/gmail/v1/gmail-api.json b/vendor/google.golang.org/api/gmail/v1/gmail-api.json index 83e716fe1..607af9b3f 100644 --- a/vendor/google.golang.org/api/gmail/v1/gmail-api.json +++ b/vendor/google.golang.org/api/gmail/v1/gmail-api.json @@ -41,7 +41,7 @@ "description": "Access Gmail mailboxes including sending user email.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/gmail/api/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/vprXt015GXFAleV2tmPH4juqBRw\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/HU5u3llBS2pcCtNMtKD_RzuFbzU\"", "icons": { "x16": "https://www.google.com/images/icons/product/googlemail-16.png", "x32": "https://www.google.com/images/icons/product/googlemail-32.png" @@ -2382,7 +2382,7 @@ } } }, - "revision": "20180131", + "revision": "20180319", "rootUrl": "https://www.googleapis.com/", "schemas": { "AutoForwarding": { @@ -3246,7 +3246,7 @@ "id": "SendAs", "properties": { "displayName": { - "description": "A name that appears in the \"From:\" header for mail sent using this alias. For custom \"from\" addresses, when this is empty, Gmail will populate the \"From:\" header with the name that is used for the primary address associated with the account.", + "description": "A name that appears in the \"From:\" header for mail sent using this alias. For custom \"from\" addresses, when this is empty, Gmail will populate the \"From:\" header with the name that is used for the primary address associated with the account. If the admin has disabled the ability for users to update their name format, requests to update this field for the primary login will silently fail.", "type": "string" }, "isDefault": { diff --git a/vendor/google.golang.org/api/gmail/v1/gmail-gen.go b/vendor/google.golang.org/api/gmail/v1/gmail-gen.go index 9c35b7e82..e1da72e82 100644 --- a/vendor/google.golang.org/api/gmail/v1/gmail-gen.go +++ b/vendor/google.golang.org/api/gmail/v1/gmail-gen.go @@ -1596,7 +1596,9 @@ type SendAs struct { // DisplayName: A name that appears in the "From:" header for mail sent // using this alias. For custom "from" addresses, when this is empty, // Gmail will populate the "From:" header with the name that is used for - // the primary address associated with the account. + // the primary address associated with the account. If the admin has + // disabled the ability for users to update their name format, requests + // to update this field for the primary login will silently fail. DisplayName string `json:"displayName,omitempty"` // IsDefault: Whether this address is selected as the default "From:" diff --git a/vendor/google.golang.org/api/manufacturers/v1/manufacturers-api.json b/vendor/google.golang.org/api/manufacturers/v1/manufacturers-api.json index a56b7ff33..661f7263f 100644 --- a/vendor/google.golang.org/api/manufacturers/v1/manufacturers-api.json +++ b/vendor/google.golang.org/api/manufacturers/v1/manufacturers-api.json @@ -9,7 +9,7 @@ } }, "basePath": "", - "baseUrl": "https://manufacturers.googleapis.com/", + "baseUrl": "https://content-manufacturers.googleapis.com/", "batchPath": "batch", "canonicalName": "Manufacturer Center", "description": "Public API for managing Manufacturer Center related data.", @@ -261,8 +261,8 @@ } } }, - "revision": "20180219", - "rootUrl": "https://manufacturers.googleapis.com/", + "revision": "20180316", + "rootUrl": "https://content-manufacturers.googleapis.com/", "schemas": { "Attributes": { "description": "Attributes of the product. For more information, see\nhttps://support.google.com/manufacturers/answer/6124116.", diff --git a/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go b/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go index 8e1797f14..c58360820 100644 --- a/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go +++ b/vendor/google.golang.org/api/manufacturers/v1/manufacturers-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "manufacturers:v1" const apiName = "manufacturers" const apiVersion = "v1" -const basePath = "https://manufacturers.googleapis.com/" +const basePath = "https://content-manufacturers.googleapis.com/" // OAuth2 scopes used by this API. const ( diff --git a/vendor/google.golang.org/api/ml/v1/ml-api.json b/vendor/google.golang.org/api/ml/v1/ml-api.json index 2512abfdd..0a5bf81b3 100644 --- a/vendor/google.golang.org/api/ml/v1/ml-api.json +++ b/vendor/google.golang.org/api/ml/v1/ml-api.json @@ -992,7 +992,7 @@ } } }, - "revision": "20180306", + "revision": "20180320", "rootUrl": "https://ml.googleapis.com/", "schemas": { "GoogleApi__HttpBody": { @@ -1487,7 +1487,7 @@ "type": "array" }, "maxValue": { - "description": "Required if typeis `DOUBLE` or `INTEGER`. This field\nshould be unset if type is `CATEGORICAL`. This value should be integers if\ntype is `INTEGER`.", + "description": "Required if type is `DOUBLE` or `INTEGER`. This field\nshould be unset if type is `CATEGORICAL`. This value should be integers if\ntype is `INTEGER`.", "format": "double", "type": "number" }, @@ -1651,7 +1651,7 @@ "type": "object" }, "GoogleCloudMlV1__TrainingInput": { - "description": "Represents input parameters for a training job. When using the\ngcloud command to submit your training job, you can specify\nthe input parameters as command-line arguments and/or in a YAML configuration\nfile referenced from the --config command-line argument. For\ndetails, see the guide to\n\u003ca href=\"/ml-engine/docs/training-jobs\"\u003esubmitting a training job\u003c/a\u003e.", + "description": "Represents input parameters for a training job. When using the\ngcloud command to submit your training job, you can specify\nthe input parameters as command-line arguments and/or in a YAML configuration\nfile referenced from the --config command-line argument. For\ndetails, see the guide to\n\u003ca href=\"/ml-engine/docs/training-jobs\"\u003esubmitting a training job\u003c/a\u003e.\nNext ID: 22", "id": "GoogleCloudMlV1__TrainingInput", "properties": { "args": { @@ -1823,6 +1823,10 @@ "description": "Required.The name specified for the version when it was created.\n\nThe version name must be unique within the model it is created in.", "type": "string" }, + "pythonVersion": { + "description": "Optional. The version of Python used in prediction. If not set, the default\nversion is '2.7'. Python '3.5' is available when `runtime_version` is set\nto '1.4' and above. Python '2.7' works with all supported runtime versions.", + "type": "string" + }, "runtimeVersion": { "description": "Optional. The Google Cloud ML runtime version to use for this deployment.\nIf not set, Google Cloud ML will choose a version.", "type": "string" diff --git a/vendor/google.golang.org/api/ml/v1/ml-gen.go b/vendor/google.golang.org/api/ml/v1/ml-gen.go index f02f42388..21b0a899d 100644 --- a/vendor/google.golang.org/api/ml/v1/ml-gen.go +++ b/vendor/google.golang.org/api/ml/v1/ml-gen.go @@ -1035,7 +1035,8 @@ type GoogleCloudMlV1__ParameterSpec struct { // should not contain more than 1,000 values. DiscreteValues []float64 `json:"discreteValues,omitempty"` - // MaxValue: Required if typeis `DOUBLE` or `INTEGER`. This field + // MaxValue: Required if type is `DOUBLE` or `INTEGER`. This + // field // should be unset if type is `CATEGORICAL`. This value should be // integers if // type is `INTEGER`. @@ -1347,6 +1348,7 @@ type GoogleCloudMlV1__SetDefaultVersionRequest struct { // details, see the guide to // submitting a training // job. +// Next ID: 22 type GoogleCloudMlV1__TrainingInput struct { // Args: Optional. Command line arguments to pass to the program. Args []string `json:"args,omitempty"` @@ -1749,6 +1751,14 @@ type GoogleCloudMlV1__Version struct { // The version name must be unique within the model it is created in. Name string `json:"name,omitempty"` + // PythonVersion: Optional. The version of Python used in prediction. If + // not set, the default + // version is '2.7'. Python '3.5' is available when `runtime_version` is + // set + // to '1.4' and above. Python '2.7' works with all supported runtime + // versions. + PythonVersion string `json:"pythonVersion,omitempty"` + // RuntimeVersion: Optional. The Google Cloud ML runtime version to use // for this deployment. // If not set, Google Cloud ML will choose a version. diff --git a/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-api.json b/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-api.json index cd0ba1667..5d2eddea0 100644 --- a/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-api.json +++ b/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-api.json @@ -314,7 +314,7 @@ } } }, - "revision": "20180212", + "revision": "20180306", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { @@ -426,7 +426,7 @@ } }, "servicePath": "", - "title": "Google Cloud OS Login API", + "title": "Cloud OS Login API", "version": "v1alpha", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-gen.go b/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-gen.go index 8230e325e..ceac68348 100644 --- a/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-gen.go +++ b/vendor/google.golang.org/api/oslogin/v1alpha/oslogin-gen.go @@ -1,4 +1,4 @@ -// Package oslogin provides access to the Google Cloud OS Login API. +// Package oslogin provides access to the Cloud OS Login API. // // See https://cloud.google.com/compute/docs/oslogin/rest/ // diff --git a/vendor/google.golang.org/api/oslogin/v1beta/oslogin-api.json b/vendor/google.golang.org/api/oslogin/v1beta/oslogin-api.json index fae950df3..513a04712 100644 --- a/vendor/google.golang.org/api/oslogin/v1beta/oslogin-api.json +++ b/vendor/google.golang.org/api/oslogin/v1beta/oslogin-api.json @@ -314,7 +314,7 @@ } } }, - "revision": "20180212", + "revision": "20180306", "rootUrl": "https://oslogin.googleapis.com/", "schemas": { "Empty": { @@ -426,7 +426,7 @@ } }, "servicePath": "", - "title": "Google Cloud OS Login API", + "title": "Cloud OS Login API", "version": "v1beta", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/oslogin/v1beta/oslogin-gen.go b/vendor/google.golang.org/api/oslogin/v1beta/oslogin-gen.go index c501e1a6b..186922ca1 100644 --- a/vendor/google.golang.org/api/oslogin/v1beta/oslogin-gen.go +++ b/vendor/google.golang.org/api/oslogin/v1beta/oslogin-gen.go @@ -1,4 +1,4 @@ -// Package oslogin provides access to the Google Cloud OS Login API. +// Package oslogin provides access to the Cloud OS Login API. // // See https://cloud.google.com/compute/docs/oslogin/rest/ // diff --git a/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-api.json b/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-api.json index a8d652dbc..b2221ea2b 100644 --- a/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-api.json +++ b/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-api.json @@ -4,7 +4,8 @@ "batchPath": "batch/pagespeedonline/v4", "description": "Analyzes the performance of a web page and provides tailored suggestions to make that page faster.", "discoveryVersion": "v1", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/pP7h1URmYSyYoOU6ofcFP2Efxhk\"", + "documentationLink": "https://developers.google.com/speed/docs/insights/v4/getting-started", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/XCul5cFRh1Et1JIhvRP7o7R9irw\"", "icons": { "x16": "https://www.google.com/images/icons/product/pagespeed-16.png", "x32": "https://www.google.com/images/icons/product/pagespeed-32.png" @@ -141,7 +142,7 @@ } } }, - "revision": "20180308", + "revision": "20180315", "rootUrl": "https://www.googleapis.com/", "schemas": { "PagespeedApiFormatStringV4": { diff --git a/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-gen.go b/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-gen.go index 2ad3ea627..c1e5cdcd8 100644 --- a/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-gen.go +++ b/vendor/google.golang.org/api/pagespeedonline/v4/pagespeedonline-gen.go @@ -1,5 +1,7 @@ // Package pagespeedonline provides access to the PageSpeed Insights API. // +// See https://developers.google.com/speed/docs/insights/v4/getting-started +// // Usage example: // // import "google.golang.org/api/pagespeedonline/v4" diff --git a/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-api.json b/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-api.json index 68a07b975..4f805136b 100644 --- a/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-api.json +++ b/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-api.json @@ -257,7 +257,7 @@ ], "parameters": { "pageSize": { - "description": "The maximum number of results returned by this request. Currently, the\ndefault maximum is set to 1000. If page_size is not provided or provided a\nnumber larger than 1000, it will be automatically set to 1000.\n\nOptional.", + "description": "The maximum number of results returned by this request. Currently, the\ndefault maximum is set to 1000. If page_size is not provided or the size\nprovided is a number larger than 1000, it will be automatically set to\n1000.\n\nOptional.", "format": "int32", "location": "query", "type": "integer" @@ -275,7 +275,7 @@ "type": "string" }, "query": { - "description": "Set a query `{expression}` for querying tenancy units. Your `{expression}`\nmust be in the format: `field_name=literal_string`. The `field_name` is the\nname of the field you want to compare. Supported fields are\n`tenant_resources.tag` and`tenant_resources.resource`.\n\nFor example, to search tenancy units that contain at least one tenant\nresource with given tag 'xyz', use query `tenant_resources.tag=xyz`.\nTo search tenancy units that contain at least one tenant resource with\ngiven resource name 'projects/123456', use query\n`tenant_resources.resource=projects/123456`.\n\nMultiple expressions can be joined with `AND`s. Tenancy units must match\nall expressions to be included in the result set. For example,\n`tenant_resources.tag=xyz AND tenant_resources.resource=projects/123456`\n\nOptional.", + "description": "Set a query `{expression}` for querying tenancy units. Your `{expression}`\nmust be in the format: `field_name=literal_string`. The `field_name` is the\nname of the field you want to compare. Supported fields are\n`tenant_resources.tag` and `tenant_resources.resource`.\n\nFor example, to search tenancy units that contain at least one tenant\nresource with given tag 'xyz', use query `tenant_resources.tag=xyz`.\nTo search tenancy units that contain at least one tenant resource with\ngiven resource name 'projects/123456', use query\n`tenant_resources.resource=projects/123456`.\n\nMultiple expressions can be joined with `AND`s. Tenancy units must match\nall expressions to be included in the result set. For example,\n`tenant_resources.tag=xyz AND tenant_resources.resource=projects/123456`\n\nOptional.", "location": "query", "type": "string" } @@ -293,7 +293,7 @@ "tenancyUnits": { "methods": { "addProject": { - "description": "Add a new tenant project to the tenancy unit.\nThere can be at most 512 tenant projects in a tenancy units.\nIf there are previously failed AddTenantProject calls, you might need to\ncall RemoveTenantProject first to clean them before you can make another\nAddTenantProject with the same tag.\nOperation\u003cresponse: Empty\u003e.", + "description": "Add a new tenant project to the tenancy unit.\nThere can be at most 512 tenant projects in a tenancy unit.\nIf there are previously failed `AddTenantProject` calls, you might need to\ncall `RemoveTenantProject` first to clean them before you can make another\n`AddTenantProject` with the same tag.\nOperation\u003cresponse: Empty\u003e.", "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}/tenancyUnits/{tenancyUnitsId}:addProject", "httpMethod": "POST", "id": "serviceconsumermanagement.services.tenancyUnits.addProject", @@ -349,7 +349,7 @@ ] }, "delete": { - "description": "Delete tenancy unit. Before the tenancy unit is deleted, there should be\nno tenant resource in it.\nOperation\u003cresponse: Empty\u003e.", + "description": "Delete a tenancy unit. Before the tenancy unit is deleted, there should be\nno tenant resources in it.\nOperation\u003cresponse: Empty\u003e.", "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}/tenancyUnits/{tenancyUnitsId}", "httpMethod": "DELETE", "id": "serviceconsumermanagement.services.tenancyUnits.delete", @@ -374,7 +374,7 @@ ] }, "list": { - "description": "Find tenancy unit for a service and consumer.\nThis method should not be used in producers' runtime path, e.g. finding\nthe tenant project number when creating VMs. Producers should persist\nthe tenant project information after the project is created.", + "description": "Find the tenancy unit for a service and consumer.\nThis method should not be used in producers' runtime path, for example\nfinding the tenant project number when creating VMs. Producers should\npersist the tenant project information after the project is created.", "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}/tenancyUnits", "httpMethod": "GET", "id": "serviceconsumermanagement.services.tenancyUnits.list", @@ -447,11 +447,11 @@ } } }, - "revision": "20180308", + "revision": "20180322", "rootUrl": "https://serviceconsumermanagement.googleapis.com/", "schemas": { "AddTenantProjectRequest": { - "description": "Request to add a newly created and configured tenant project to tenancy\nunit.", + "description": "Request to add a newly created and configured tenant project to a tenancy\nunit.", "id": "AddTenantProjectRequest", "properties": { "projectConfig": { @@ -690,7 +690,7 @@ "type": "object" }, "BillingConfig": { - "description": "Describes billing configuration for a new Tenant Project", + "description": "Describes billing configuration for a new tenant project.", "id": "BillingConfig", "properties": { "billingAccount": { @@ -793,7 +793,7 @@ "id": "CreateTenancyUnitRequest", "properties": { "tenancyUnitId": { - "description": "Optional producer provided identifier of the tenancy unit\nMust be no longer than 40 characters and preferably URI friendly\nIf it is not provided, UID for the tenancy unit will be auto generated\nIt must be unique across a service.\nIf the tenancy unit already exists for the service and consumer pair,\nCreateTenancyUnit will return existing tenancy unit if provided identifier\nis identical or empty, otherwise the call will fail.", + "description": "Optional producer provided identifier of the tenancy unit.\nMust be no longer than 40 characters and preferably URI friendly.\nIf it is not provided, a UID for the tenancy unit will be auto generated.\nIt must be unique across a service.\nIf the tenancy unit already exists for the service and consumer pair,\n`CreateTenancyUnit` will return the existing tenancy unit if the provided\nidentifier is identical or empty, otherwise the call will fail.", "type": "string" } }, @@ -862,7 +862,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;--BETA: comment for BETA users --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1276,7 +1276,7 @@ "type": "string" }, "tenancyUnits": { - "description": "Tenancy Units matching the request.", + "description": "Tenancy units matching the request.", "items": { "$ref": "TenancyUnit" }, @@ -1738,7 +1738,7 @@ "id": "PolicyBinding", "properties": { "members": { - "description": "Uses the same format as in IAM policy.\n`member` must include both prefix and id. E.g., `user:{emailId}`,\n`serviceAccount:{emailId}`, `group:{emailId}`.", + "description": "Uses the same format as in IAM policy.\n`member` must include both prefix and ID. For example, `user:{emailId}`,\n`serviceAccount:{emailId}`, `group:{emailId}`.", "items": { "type": "string" }, @@ -1995,10 +1995,6 @@ "usage": { "$ref": "Usage", "description": "Configuration controlling usage of this service." - }, - "visibility": { - "$ref": "Visibility", - "description": "API visibility configuration." } }, "type": "object" @@ -2012,7 +2008,7 @@ "type": "string" }, "tenantProjectRoles": { - "description": "Roles for the service account above on tenant project.", + "description": "Roles for the associated service account for the tenant project.", "items": { "type": "string" }, @@ -2133,7 +2129,7 @@ "id": "TenancyUnit", "properties": { "consumer": { - "description": "@OutputOnly Cloud resource One Platform Name of the consumer of this\nservice. For example 'projects/123456'.", + "description": "@OutputOnly Cloud resource name of the consumer of this service.\nFor example 'projects/123456'.", "type": "string" }, "createTime": { @@ -2165,7 +2161,7 @@ "properties": { "billingConfig": { "$ref": "BillingConfig", - "description": "Billing account properties.\nIt may be specified explicitly, or created from the specified group\nduring provisioning" + "description": "Billing account properties.\nIt might be specified explicitly, or created from the specified group\nduring provisioning" }, "folder": { "description": "Folder where project in this tenancy unit must be located\nThis folder must have been previously created with proper\npermissions for the caller to create and configure a project in it.\nValid folder resource names have the format `folders/{folder_number}`\n(for example, `folders/123456`).", @@ -2183,7 +2179,7 @@ "description": "Configuration for IAM service account on tenant project." }, "services": { - "description": "Google Cloud API names of services that will be activated on this project\nduring provisioning. If any of these services can not be activated,\naddTenantProject method will fail.\nFor example: 'compute.googleapis.com','cloudfunctions.googleapis.com'", + "description": "Google Cloud API names of services that will be activated on this project\nduring provisioning. If any of these services can not be activated,\nrequest will fail.\nFor example: 'compute.googleapis.com','cloudfunctions.googleapis.com'", "items": { "type": "string" }, @@ -2197,7 +2193,7 @@ "type": "object" }, "TenantProjectPolicy": { - "description": "Describes policy settings that need to be applied to a newly\ncreated Tenant Project.", + "description": "Describes policy settings that need to be applied to a newly\ncreated tenant project.", "id": "TenantProjectPolicy", "properties": { "policyBindings": { @@ -2215,7 +2211,7 @@ "id": "TenantResource", "properties": { "resource": { - "description": "@OutputOnly Identifier of the tenant resource.\nFor cloud projects it is in the form 'projects/{number}'.\nFor example 'projects/123456'.", + "description": "@OutputOnly Identifier of the tenant resource.\nFor cloud projects, it is in the form 'projects/{number}'.\nFor example 'projects/123456'.", "type": "string" }, "status": { @@ -2334,35 +2330,6 @@ } }, "type": "object" - }, - "Visibility": { - "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", - "id": "Visibility", - "properties": { - "rules": { - "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "items": { - "$ref": "VisibilityRule" - }, - "type": "array" - } - }, - "type": "object" - }, - "VisibilityRule": { - "description": "A visibility rule provides visibility configuration for an individual API\nelement.", - "id": "VisibilityRule", - "properties": { - "restriction": { - "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", - "type": "string" - }, - "selector": { - "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" - } - }, - "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go b/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go index 31462b403..1b9c44638 100644 --- a/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go +++ b/vendor/google.golang.org/api/serviceconsumermanagement/v1/serviceconsumermanagement-gen.go @@ -112,7 +112,7 @@ type ServicesTenancyUnitsService struct { } // AddTenantProjectRequest: Request to add a newly created and -// configured tenant project to tenancy +// configured tenant project to a tenancy // unit. type AddTenantProjectRequest struct { // ProjectConfig: Configuration of the new tenant project that will be @@ -767,8 +767,8 @@ func (s *Billing) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BillingConfig: Describes billing configuration for a new Tenant -// Project +// BillingConfig: Describes billing configuration for a new tenant +// project. type BillingConfig struct { // BillingAccount: Name of the billing account. // For example `billingAccounts/012345-567890-ABCDEF`. @@ -1006,16 +1006,16 @@ func (s *Control) MarshalJSON() ([]byte, error) { // consumer of a service. type CreateTenancyUnitRequest struct { // TenancyUnitId: Optional producer provided identifier of the tenancy - // unit - // Must be no longer than 40 characters and preferably URI friendly - // If it is not provided, UID for the tenancy unit will be auto - // generated + // unit. + // Must be no longer than 40 characters and preferably URI friendly. + // If it is not provided, a UID for the tenancy unit will be auto + // generated. // It must be unique across a service. // If the tenancy unit already exists for the service and consumer // pair, - // CreateTenancyUnit will return existing tenancy unit if provided - // identifier - // is identical or empty, otherwise the call will fail. + // `CreateTenancyUnit` will return the existing tenancy unit if the + // provided + // identifier is identical or empty, otherwise the call will fail. TenancyUnitId string `json:"tenancyUnitId,omitempty"` // ForceSendFields is a list of field names (e.g. "TenancyUnitId") to @@ -1239,11 +1239,7 @@ func (s *CustomHttpPattern) MarshalJSON() ([]byte, error) { // Text can be excluded from doc using the following // notation: //
(-- internal comment --)
-// Comments can be made conditional using a visibility label. The -// below -// text will be only rendered if the `BETA` label is -// available: -//
(--BETA: comment for BETA users --)
+// // A few directives are available in documentation. Note that // directives must appear on a single line to be properly // identified. The `include` directive includes a markdown file from @@ -2203,7 +2199,7 @@ type ListTenancyUnitsResponse struct { // NextPageToken: Pagination token for large results. NextPageToken string `json:"nextPageToken,omitempty"` - // TenancyUnits: Tenancy Units matching the request. + // TenancyUnits: Tenancy units matching the request. TenancyUnits []*TenancyUnit `json:"tenancyUnits,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3365,7 +3361,7 @@ func (s *Page) MarshalJSON() ([]byte, error) { // this level) type PolicyBinding struct { // Members: Uses the same format as in IAM policy. - // `member` must include both prefix and id. E.g., + // `member` must include both prefix and ID. For example, // `user:{emailId}`, // `serviceAccount:{emailId}`, `group:{emailId}`. Members []string `json:"members,omitempty"` @@ -3853,9 +3849,6 @@ type Service struct { // Usage: Configuration controlling usage of this service. Usage *Usage `json:"usage,omitempty"` - // Visibility: API visibility configuration. - Visibility *Visibility `json:"visibility,omitempty"` - // ForceSendFields is a list of field names (e.g. "Apis") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3892,8 +3885,8 @@ type ServiceAccountConfig struct { // have to guarantee it. AccountId string `json:"accountId,omitempty"` - // TenantProjectRoles: Roles for the service account above on tenant - // project. + // TenantProjectRoles: Roles for the associated service account for the + // tenant project. TenantProjectRoles []string `json:"tenantProjectRoles,omitempty"` // ForceSendFields is a list of field names (e.g. "AccountId") to @@ -4256,9 +4249,9 @@ func (s *SystemParameters) MarshalJSON() ([]byte, error) { // TenancyUnit: Representation of a tenancy unit. type TenancyUnit struct { - // Consumer: @OutputOnly Cloud resource One Platform Name of the - // consumer of this - // service. For example 'projects/123456'. + // Consumer: @OutputOnly Cloud resource name of the consumer of this + // service. + // For example 'projects/123456'. Consumer string `json:"consumer,omitempty"` // CreateTime: @OutputOnly The time this tenancy unit was created. @@ -4317,7 +4310,7 @@ func (s *TenancyUnit) MarshalJSON() ([]byte, error) { // removal. type TenantProjectConfig struct { // BillingConfig: Billing account properties. - // It may be specified explicitly, or created from the specified + // It might be specified explicitly, or created from the specified // group // during provisioning BillingConfig *BillingConfig `json:"billingConfig,omitempty"` @@ -4343,7 +4336,7 @@ type TenantProjectConfig struct { // on this project // during provisioning. If any of these services can not be // activated, - // addTenantProject method will fail. + // request will fail. // For example: 'compute.googleapis.com','cloudfunctions.googleapis.com' Services []string `json:"services,omitempty"` @@ -4376,7 +4369,7 @@ func (s *TenantProjectConfig) MarshalJSON() ([]byte, error) { // TenantProjectPolicy: Describes policy settings that need to be // applied to a newly -// created Tenant Project. +// created tenant project. type TenantProjectPolicy struct { // PolicyBindings: Policy bindings to be applied to the tenant project, // in addition to the @@ -4417,7 +4410,7 @@ func (s *TenantProjectPolicy) MarshalJSON() ([]byte, error) { // TenantResource: Resource constituting the TenancyUnit. type TenantResource struct { // Resource: @OutputOnly Identifier of the tenant resource. - // For cloud projects it is in the form 'projects/{number}'. + // For cloud projects, it is in the form 'projects/{number}'. // For example 'projects/123456'. Resource string `json:"resource,omitempty"` @@ -4639,119 +4632,6 @@ func (s *UsageRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Visibility: `Visibility` defines restrictions for the visibility of -// service -// elements. Restrictions are specified using visibility labels -// (e.g., TRUSTED_TESTER) that are elsewhere linked to users and -// projects. -// -// Users and projects can have access to more than one visibility label. -// The -// effective visibility for multiple labels is the union of each -// label's -// elements, plus any unrestricted elements. -// -// If an element and its parents have no restrictions, visibility -// is -// unconditionally granted. -// -// Example: -// -// visibility: -// rules: -// - selector: google.calendar.Calendar.EnhancedSearch -// restriction: TRUSTED_TESTER -// - selector: google.calendar.Calendar.Delegate -// restriction: GOOGLE_INTERNAL -// -// Here, all methods are publicly visible except for the restricted -// methods -// EnhancedSearch and Delegate. -type Visibility struct { - // Rules: A list of visibility rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. - Rules []*VisibilityRule `json:"rules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rules") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rules") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Visibility) MarshalJSON() ([]byte, error) { - type NoMethod Visibility - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VisibilityRule: A visibility rule provides visibility configuration -// for an individual API -// element. -type VisibilityRule struct { - // Restriction: A comma-separated list of visibility labels that apply - // to the `selector`. - // Any of the listed labels can be used to grant the visibility. - // - // If a rule has multiple labels, removing one of the labels but not all - // of - // them can break clients. - // - // Example: - // - // visibility: - // rules: - // - selector: google.calendar.Calendar.EnhancedSearch - // restriction: GOOGLE_INTERNAL, TRUSTED_TESTER - // - // Removing GOOGLE_INTERNAL from this restriction will break clients - // that - // rely on this method and only had access to it through - // GOOGLE_INTERNAL. - Restriction string `json:"restriction,omitempty"` - - // Selector: Selects methods, messages, fields, enums, etc. to which - // this rule applies. - // - // Refer to selector for syntax details. - Selector string `json:"selector,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Restriction") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Restriction") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VisibilityRule) MarshalJSON() ([]byte, error) { - type NoMethod VisibilityRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // method id "serviceconsumermanagement.operations.cancel": type OperationsCancelCall struct { @@ -5415,9 +5295,11 @@ func (r *ServicesService) Search(parent string) *ServicesSearchCall { // PageSize sets the optional parameter "pageSize": The maximum number // of results returned by this request. Currently, the -// default maximum is set to 1000. If page_size is not provided or -// provided a -// number larger than 1000, it will be automatically set to 1000. +// default maximum is set to 1000. If page_size is not provided or the +// size +// provided is a number larger than 1000, it will be automatically set +// to +// 1000. func (c *ServicesSearchCall) PageSize(pageSize int64) *ServicesSearchCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c @@ -5439,7 +5321,7 @@ func (c *ServicesSearchCall) PageToken(pageToken string) *ServicesSearchCall { // is the // name of the field you want to compare. Supported fields // are -// `tenant_resources.tag` and`tenant_resources.resource`. +// `tenant_resources.tag` and `tenant_resources.resource`. // // For example, to search tenancy units that contain at least one // tenant @@ -5565,7 +5447,7 @@ func (c *ServicesSearchCall) Do(opts ...googleapi.CallOption) (*SearchTenancyUni // ], // "parameters": { // "pageSize": { - // "description": "The maximum number of results returned by this request. Currently, the\ndefault maximum is set to 1000. If page_size is not provided or provided a\nnumber larger than 1000, it will be automatically set to 1000.\n\nOptional.", + // "description": "The maximum number of results returned by this request. Currently, the\ndefault maximum is set to 1000. If page_size is not provided or the size\nprovided is a number larger than 1000, it will be automatically set to\n1000.\n\nOptional.", // "format": "int32", // "location": "query", // "type": "integer" @@ -5583,7 +5465,7 @@ func (c *ServicesSearchCall) Do(opts ...googleapi.CallOption) (*SearchTenancyUni // "type": "string" // }, // "query": { - // "description": "Set a query `{expression}` for querying tenancy units. Your `{expression}`\nmust be in the format: `field_name=literal_string`. The `field_name` is the\nname of the field you want to compare. Supported fields are\n`tenant_resources.tag` and`tenant_resources.resource`.\n\nFor example, to search tenancy units that contain at least one tenant\nresource with given tag 'xyz', use query `tenant_resources.tag=xyz`.\nTo search tenancy units that contain at least one tenant resource with\ngiven resource name 'projects/123456', use query\n`tenant_resources.resource=projects/123456`.\n\nMultiple expressions can be joined with `AND`s. Tenancy units must match\nall expressions to be included in the result set. For example,\n`tenant_resources.tag=xyz AND tenant_resources.resource=projects/123456`\n\nOptional.", + // "description": "Set a query `{expression}` for querying tenancy units. Your `{expression}`\nmust be in the format: `field_name=literal_string`. The `field_name` is the\nname of the field you want to compare. Supported fields are\n`tenant_resources.tag` and `tenant_resources.resource`.\n\nFor example, to search tenancy units that contain at least one tenant\nresource with given tag 'xyz', use query `tenant_resources.tag=xyz`.\nTo search tenancy units that contain at least one tenant resource with\ngiven resource name 'projects/123456', use query\n`tenant_resources.resource=projects/123456`.\n\nMultiple expressions can be joined with `AND`s. Tenancy units must match\nall expressions to be included in the result set. For example,\n`tenant_resources.tag=xyz AND tenant_resources.resource=projects/123456`\n\nOptional.", // "location": "query", // "type": "string" // } @@ -5632,12 +5514,12 @@ type ServicesTenancyUnitsAddProjectCall struct { } // AddProject: Add a new tenant project to the tenancy unit. -// There can be at most 512 tenant projects in a tenancy units. -// If there are previously failed AddTenantProject calls, you might need -// to -// call RemoveTenantProject first to clean them before you can make +// There can be at most 512 tenant projects in a tenancy unit. +// If there are previously failed `AddTenantProject` calls, you might +// need to +// call `RemoveTenantProject` first to clean them before you can make // another -// AddTenantProject with the same tag. +// `AddTenantProject` with the same tag. // Operation. func (r *ServicesTenancyUnitsService) AddProject(parent string, addtenantprojectrequest *AddTenantProjectRequest) *ServicesTenancyUnitsAddProjectCall { c := &ServicesTenancyUnitsAddProjectCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5732,7 +5614,7 @@ func (c *ServicesTenancyUnitsAddProjectCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Add a new tenant project to the tenancy unit.\nThere can be at most 512 tenant projects in a tenancy units.\nIf there are previously failed AddTenantProject calls, you might need to\ncall RemoveTenantProject first to clean them before you can make another\nAddTenantProject with the same tag.\nOperation\u003cresponse: Empty\u003e.", + // "description": "Add a new tenant project to the tenancy unit.\nThere can be at most 512 tenant projects in a tenancy unit.\nIf there are previously failed `AddTenantProject` calls, you might need to\ncall `RemoveTenantProject` first to clean them before you can make another\n`AddTenantProject` with the same tag.\nOperation\u003cresponse: Empty\u003e.", // "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}/tenancyUnits/{tenancyUnitsId}:addProject", // "httpMethod": "POST", // "id": "serviceconsumermanagement.services.tenancyUnits.addProject", @@ -5907,9 +5789,9 @@ type ServicesTenancyUnitsDeleteCall struct { header_ http.Header } -// Delete: Delete tenancy unit. Before the tenancy unit is deleted, +// Delete: Delete a tenancy unit. Before the tenancy unit is deleted, // there should be -// no tenant resource in it. +// no tenant resources in it. // Operation. func (r *ServicesTenancyUnitsService) Delete(name string) *ServicesTenancyUnitsDeleteCall { c := &ServicesTenancyUnitsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -5998,7 +5880,7 @@ func (c *ServicesTenancyUnitsDeleteCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Delete tenancy unit. Before the tenancy unit is deleted, there should be\nno tenant resource in it.\nOperation\u003cresponse: Empty\u003e.", + // "description": "Delete a tenancy unit. Before the tenancy unit is deleted, there should be\nno tenant resources in it.\nOperation\u003cresponse: Empty\u003e.", // "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}/tenancyUnits/{tenancyUnitsId}", // "httpMethod": "DELETE", // "id": "serviceconsumermanagement.services.tenancyUnits.delete", @@ -6036,12 +5918,12 @@ type ServicesTenancyUnitsListCall struct { header_ http.Header } -// List: Find tenancy unit for a service and consumer. -// This method should not be used in producers' runtime path, e.g. -// finding -// the tenant project number when creating VMs. Producers should -// persist -// the tenant project information after the project is created. +// List: Find the tenancy unit for a service and consumer. +// This method should not be used in producers' runtime path, for +// example +// finding the tenant project number when creating VMs. Producers +// should +// persist the tenant project information after the project is created. func (r *ServicesTenancyUnitsService) List(parent string) *ServicesTenancyUnitsListCall { c := &ServicesTenancyUnitsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.parent = parent @@ -6166,7 +6048,7 @@ func (c *ServicesTenancyUnitsListCall) Do(opts ...googleapi.CallOption) (*ListTe } return ret, nil // { - // "description": "Find tenancy unit for a service and consumer.\nThis method should not be used in producers' runtime path, e.g. finding\nthe tenant project number when creating VMs. Producers should persist\nthe tenant project information after the project is created.", + // "description": "Find the tenancy unit for a service and consumer.\nThis method should not be used in producers' runtime path, for example\nfinding the tenant project number when creating VMs. Producers should\npersist the tenant project information after the project is created.", // "flatPath": "v1/services/{servicesId}/{servicesId1}/{servicesId2}/tenancyUnits", // "httpMethod": "GET", // "id": "serviceconsumermanagement.services.tenancyUnits.list", diff --git a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json index 0e4abe7c4..cbab22f7b 100644 --- a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json +++ b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-api.json @@ -291,7 +291,7 @@ } } }, - "revision": "20180303", + "revision": "20180312", "rootUrl": "https://servicecontrol.googleapis.com/", "schemas": { "AllocateInfo": { @@ -504,6 +504,7 @@ "NO_LOAS_PROJECT", "LOAS_PROJECT_DISABLED", "SECURITY_POLICY_VIOLATED", + "INVALID_CREDENTIAL", "NAMESPACE_LOOKUP_UNAVAILABLE", "SERVICE_STATUS_UNAVAILABLE", "BILLING_STATUS_UNAVAILABLE", @@ -538,6 +539,7 @@ "The consumer's LOAS role has no associated project.", "The consumer's LOAS project is not `ACTIVE` in LoquatV2.", "Request is not allowed as per security policies defined in Org Policy.", + "The credential in the request can not be verified.", "The backend server for looking up project id/number is unavailable.", "The backend server for checking service status is unavailable.", "The backend server for checking billing status is unavailable.", diff --git a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go index af0908956..eb5788dd9 100644 --- a/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go +++ b/vendor/google.golang.org/api/servicecontrol/v1/servicecontrol-gen.go @@ -470,6 +470,8 @@ type CheckError struct { // `ACTIVE` in LoquatV2. // "SECURITY_POLICY_VIOLATED" - Request is not allowed as per security // policies defined in Org Policy. + // "INVALID_CREDENTIAL" - The credential in the request can not be + // verified. // "NAMESPACE_LOOKUP_UNAVAILABLE" - The backend server for looking up // project id/number is unavailable. // "SERVICE_STATUS_UNAVAILABLE" - The backend server for checking diff --git a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json index 68c18b18c..e3e9637fb 100644 --- a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json +++ b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-api.json @@ -18,7 +18,7 @@ } }, "basePath": "", - "baseUrl": "https://servicemanagement.googleapis.com/", + "baseUrl": "https://content-servicemanagement.googleapis.com/", "batchPath": "batch", "canonicalName": "Service Management", "description": "Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.", @@ -542,7 +542,7 @@ "configs": { "methods": { "create": { - "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.", + "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.", "flatPath": "v1/services/{serviceName}/configs", "httpMethod": "POST", "id": "servicemanagement.services.configs.create", @@ -651,7 +651,7 @@ ] }, "submit": { - "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", + "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", "flatPath": "v1/services/{serviceName}/configs:submit", "httpMethod": "POST", "id": "servicemanagement.services.configs.submit", @@ -778,7 +778,7 @@ "rollouts": { "methods": { "create": { - "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOperation\u003cresponse: Rollout\u003e", + "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation\u003cresponse: Rollout\u003e", "flatPath": "v1/services/{serviceName}/rollouts", "httpMethod": "POST", "id": "servicemanagement.services.rollouts.create", @@ -887,8 +887,8 @@ } } }, - "revision": "20180223", - "rootUrl": "https://servicemanagement.googleapis.com/", + "revision": "20180317", + "rootUrl": "https://content-servicemanagement.googleapis.com/", "schemas": { "Advice": { "description": "Generated advice about this change, used for providing more\ninformation about how a change will affect the existing service.", @@ -1486,7 +1486,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;--BETA: comment for BETA users --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -2787,10 +2787,6 @@ "usage": { "$ref": "Usage", "description": "Configuration controlling usage of this service." - }, - "visibility": { - "$ref": "Visibility", - "description": "API visibility configuration." } }, "type": "object" @@ -3120,35 +3116,6 @@ } }, "type": "object" - }, - "Visibility": { - "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", - "id": "Visibility", - "properties": { - "rules": { - "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "items": { - "$ref": "VisibilityRule" - }, - "type": "array" - } - }, - "type": "object" - }, - "VisibilityRule": { - "description": "A visibility rule provides visibility configuration for an individual API\nelement.", - "id": "VisibilityRule", - "properties": { - "restriction": { - "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", - "type": "string" - }, - "selector": { - "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" - } - }, - "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go index af296a861..43237502b 100644 --- a/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go +++ b/vendor/google.golang.org/api/servicemanagement/v1/servicemanagement-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "servicemanagement:v1" const apiName = "servicemanagement" const apiVersion = "v1" -const basePath = "https://servicemanagement.googleapis.com/" +const basePath = "https://content-servicemanagement.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -1685,11 +1685,7 @@ func (s *DisableServiceRequest) MarshalJSON() ([]byte, error) { // Text can be excluded from doc using the following // notation: //
(-- internal comment --)
-// Comments can be made conditional using a visibility label. The -// below -// text will be only rendered if the `BETA` label is -// available: -//
(--BETA: comment for BETA users --)
+// // A few directives are available in documentation. Note that // directives must appear on a single line to be properly // identified. The `include` directive includes a markdown file from @@ -4599,9 +4595,6 @@ type Service struct { // Usage: Configuration controlling usage of this service. Usage *Usage `json:"usage,omitempty"` - // Visibility: API visibility configuration. - Visibility *Visibility `json:"visibility,omitempty"` - // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -5456,119 +5449,6 @@ func (s *UsageRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Visibility: `Visibility` defines restrictions for the visibility of -// service -// elements. Restrictions are specified using visibility labels -// (e.g., TRUSTED_TESTER) that are elsewhere linked to users and -// projects. -// -// Users and projects can have access to more than one visibility label. -// The -// effective visibility for multiple labels is the union of each -// label's -// elements, plus any unrestricted elements. -// -// If an element and its parents have no restrictions, visibility -// is -// unconditionally granted. -// -// Example: -// -// visibility: -// rules: -// - selector: google.calendar.Calendar.EnhancedSearch -// restriction: TRUSTED_TESTER -// - selector: google.calendar.Calendar.Delegate -// restriction: GOOGLE_INTERNAL -// -// Here, all methods are publicly visible except for the restricted -// methods -// EnhancedSearch and Delegate. -type Visibility struct { - // Rules: A list of visibility rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. - Rules []*VisibilityRule `json:"rules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rules") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rules") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Visibility) MarshalJSON() ([]byte, error) { - type NoMethod Visibility - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VisibilityRule: A visibility rule provides visibility configuration -// for an individual API -// element. -type VisibilityRule struct { - // Restriction: A comma-separated list of visibility labels that apply - // to the `selector`. - // Any of the listed labels can be used to grant the visibility. - // - // If a rule has multiple labels, removing one of the labels but not all - // of - // them can break clients. - // - // Example: - // - // visibility: - // rules: - // - selector: google.calendar.Calendar.EnhancedSearch - // restriction: GOOGLE_INTERNAL, TRUSTED_TESTER - // - // Removing GOOGLE_INTERNAL from this restriction will break clients - // that - // rely on this method and only had access to it through - // GOOGLE_INTERNAL. - Restriction string `json:"restriction,omitempty"` - - // Selector: Selects methods, messages, fields, enums, etc. to which - // this rule applies. - // - // Refer to selector for syntax details. - Selector string `json:"selector,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Restriction") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Restriction") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VisibilityRule) MarshalJSON() ([]byte, error) { - type NoMethod VisibilityRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // method id "servicemanagement.operations.get": type OperationsGetCall struct { @@ -7720,8 +7600,15 @@ type ServicesConfigsCreateCall struct { // service. // This method only stores the service configuration. To roll out the // service -// configuration to backend systems please call +// configuration to backend systems please +// call // CreateServiceRollout. +// +// Only the 100 most recent service configurations and ones referenced +// by +// existing rollouts are kept for each service. The rest will be +// deleted +// eventually. func (r *ServicesConfigsService) Create(serviceName string, service *Service) *ServicesConfigsCreateCall { c := &ServicesConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.serviceName = serviceName @@ -7815,7 +7702,7 @@ func (c *ServicesConfigsCreateCall) Do(opts ...googleapi.CallOption) (*Service, } return ret, nil // { - // "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.", + // "description": "Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually.", // "flatPath": "v1/services/{serviceName}/configs", // "httpMethod": "POST", // "id": "servicemanagement.services.configs.create", @@ -8229,6 +8116,12 @@ type ServicesConfigsSubmitCall struct { // other services, // please call CreateServiceRollout. // +// Only the 100 most recent configuration sources and ones referenced +// by +// existing service configurtions are kept for each service. The rest +// will be +// deleted eventually. +// // Operation func (r *ServicesConfigsService) Submit(serviceName string, submitconfigsourcerequest *SubmitConfigSourceRequest) *ServicesConfigsSubmitCall { c := &ServicesConfigsSubmitCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -8323,7 +8216,7 @@ func (c *ServicesConfigsSubmitCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", + // "description": "Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation\u003cresponse: SubmitConfigSourceResponse\u003e", // "flatPath": "v1/services/{serviceName}/configs:submit", // "httpMethod": "POST", // "id": "servicemanagement.services.configs.submit", @@ -8805,6 +8698,12 @@ type ServicesRolloutsCreateCall struct { // will // not be blocked by previous Rollouts. // +// Only the 100 most recent (in any state) and the last 10 successful +// (if not +// already part of the set of 100 most recent) rollouts are kept for +// each +// service. The rest will be deleted eventually. +// // Operation func (r *ServicesRolloutsService) Create(serviceName string, rollout *Rollout) *ServicesRolloutsCreateCall { c := &ServicesRolloutsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -8899,7 +8798,7 @@ func (c *ServicesRolloutsCreateCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOperation\u003cresponse: Rollout\u003e", + // "description": "Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation\u003cresponse: Rollout\u003e", // "flatPath": "v1/services/{serviceName}/rollouts", // "httpMethod": "POST", // "id": "servicemanagement.services.rollouts.create", diff --git a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json index 32fec511e..cbf772769 100644 --- a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json +++ b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-api.json @@ -201,7 +201,7 @@ ], "parameters": { "parent": { - "description": "Parent to enable services on.\n\nAn example name would be:\nprojects/123\n\nThe `BatchEnableServices` method currently only supports projects.", + "description": "Parent to enable services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).\n\nThe `BatchEnableServices` method currently only supports projects.", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -230,7 +230,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com", + "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -259,7 +259,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com", + "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -288,7 +288,7 @@ ], "parameters": { "name": { - "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com", + "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+/services/[^/]+$", "required": true, @@ -330,7 +330,7 @@ "type": "string" }, "parent": { - "description": "Parent to search for services on.\n\nAn example name would be:\nprojects/123", + "description": "Parent to search for services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).", "location": "path", "pattern": "^[^/]+/[^/]+$", "required": true, @@ -349,7 +349,7 @@ } } }, - "revision": "20180308", + "revision": "20180322", "rootUrl": "https://serviceusage.googleapis.com/", "schemas": { "Api": { @@ -741,7 +741,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;--BETA: comment for BETA users --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1152,10 +1152,6 @@ "usage": { "$ref": "Usage", "description": "Configuration controlling usage of this service." - }, - "visibility": { - "$ref": "Visibility", - "description": "API visibility configuration." } }, "type": "object" @@ -2173,35 +2169,6 @@ } }, "type": "object" - }, - "Visibility": { - "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", - "id": "Visibility", - "properties": { - "rules": { - "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "items": { - "$ref": "VisibilityRule" - }, - "type": "array" - } - }, - "type": "object" - }, - "VisibilityRule": { - "description": "A visibility rule provides visibility configuration for an individual API\nelement.", - "id": "VisibilityRule", - "properties": { - "restriction": { - "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", - "type": "string" - }, - "selector": { - "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" - } - }, - "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go index f2e171638..24018cd84 100644 --- a/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go +++ b/vendor/google.golang.org/api/serviceusage/v1beta1/serviceusage-gen.go @@ -1171,11 +1171,7 @@ type DisableServiceRequest struct { // Text can be excluded from doc using the following // notation: //
(-- internal comment --)
-// Comments can be made conditional using a visibility label. The -// below -// text will be only rendered if the `BETA` label is -// available: -//
(--BETA: comment for BETA users --)
+// // A few directives are available in documentation. Note that // directives must appear on a single line to be properly // identified. The `include` directive includes a markdown file from @@ -1749,9 +1745,6 @@ type GoogleApiService struct { // Usage: Configuration controlling usage of this service. Usage *Usage `json:"usage,omitempty"` - // Visibility: API visibility configuration. - Visibility *Visibility `json:"visibility,omitempty"` - // ForceSendFields is a list of field names (e.g. "Apis") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -4423,119 +4416,6 @@ func (s *UsageRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Visibility: `Visibility` defines restrictions for the visibility of -// service -// elements. Restrictions are specified using visibility labels -// (e.g., TRUSTED_TESTER) that are elsewhere linked to users and -// projects. -// -// Users and projects can have access to more than one visibility label. -// The -// effective visibility for multiple labels is the union of each -// label's -// elements, plus any unrestricted elements. -// -// If an element and its parents have no restrictions, visibility -// is -// unconditionally granted. -// -// Example: -// -// visibility: -// rules: -// - selector: google.calendar.Calendar.EnhancedSearch -// restriction: TRUSTED_TESTER -// - selector: google.calendar.Calendar.Delegate -// restriction: GOOGLE_INTERNAL -// -// Here, all methods are publicly visible except for the restricted -// methods -// EnhancedSearch and Delegate. -type Visibility struct { - // Rules: A list of visibility rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. - Rules []*VisibilityRule `json:"rules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rules") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rules") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Visibility) MarshalJSON() ([]byte, error) { - type NoMethod Visibility - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VisibilityRule: A visibility rule provides visibility configuration -// for an individual API -// element. -type VisibilityRule struct { - // Restriction: A comma-separated list of visibility labels that apply - // to the `selector`. - // Any of the listed labels can be used to grant the visibility. - // - // If a rule has multiple labels, removing one of the labels but not all - // of - // them can break clients. - // - // Example: - // - // visibility: - // rules: - // - selector: google.calendar.Calendar.EnhancedSearch - // restriction: GOOGLE_INTERNAL, TRUSTED_TESTER - // - // Removing GOOGLE_INTERNAL from this restriction will break clients - // that - // rely on this method and only had access to it through - // GOOGLE_INTERNAL. - Restriction string `json:"restriction,omitempty"` - - // Selector: Selects methods, messages, fields, enums, etc. to which - // this rule applies. - // - // Refer to selector for syntax details. - Selector string `json:"selector,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Restriction") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Restriction") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VisibilityRule) MarshalJSON() ([]byte, error) { - type NoMethod VisibilityRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // method id "serviceusage.operations.get": type OperationsGetCall struct { @@ -5011,7 +4891,7 @@ func (c *ServicesBatchEnableCall) Do(opts ...googleapi.CallOption) (*Operation, // ], // "parameters": { // "parent": { - // "description": "Parent to enable services on.\n\nAn example name would be:\nprojects/123\n\nThe `BatchEnableServices` method currently only supports projects.", + // "description": "Parent to enable services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).\n\nThe `BatchEnableServices` method currently only supports projects.", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, @@ -5159,7 +5039,7 @@ func (c *ServicesDisableCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com", + // "description": "Name of the consumer and service to disable the service on.\n\nThe enable and disable methods currently only support projects.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -5298,7 +5178,7 @@ func (c *ServicesEnableCall) Do(opts ...googleapi.CallOption) (*Operation, error // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com", + // "description": "Name of the consumer and service to enable the service on.\n\nThe `EnableService` and `DisableService` methods currently only support\nprojects.\n\nEnabling a service requires that the service is public or is shared with\nthe user enabling the service.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -5442,7 +5322,7 @@ func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*Service, error) { // ], // "parameters": { // "name": { - // "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\nprojects/123/services/serviceusage.googleapis.com", + // "description": "Name of the consumer and service to get the `ConsumerState` for.\n\nAn example name would be:\n`projects/123/services/serviceusage.googleapis.com`\nwhere `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+/services/[^/]+$", // "required": true, @@ -5634,7 +5514,7 @@ func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesRespon // "type": "string" // }, // "parent": { - // "description": "Parent to search for services on.\n\nAn example name would be:\nprojects/123", + // "description": "Parent to search for services on.\n\nAn example name would be:\n`projects/123`\nwhere `123` is the project number (not project ID).", // "location": "path", // "pattern": "^[^/]+/[^/]+$", // "required": true, diff --git a/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json index 3749e7779..c36c5e61a 100644 --- a/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json +++ b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-api.json @@ -15,7 +15,7 @@ } }, "basePath": "", - "baseUrl": "https://serviceuser.googleapis.com/", + "baseUrl": "https://content-serviceuser.googleapis.com/", "batchPath": "batch", "canonicalName": "Service User", "description": "Enables services that service consumers want to use on Google Cloud Platform, lists the available or enabled services, or disables services that service consumers no longer use.", @@ -256,8 +256,8 @@ } } }, - "revision": "20180223", - "rootUrl": "https://serviceuser.googleapis.com/", + "revision": "20180316", + "rootUrl": "https://content-serviceuser.googleapis.com/", "schemas": { "Api": { "description": "Api is a light-weight descriptor for an API Interface.\n\nInterfaces are also described as \"protocol buffer services\" in some contexts,\nsuch as by the \"service\" keyword in a .proto file, but they are different\nfrom API Services, which represent a concrete implementation of an interface\nas opposed to simply a description of methods and bindings. They are also\nsometimes simply referred to as \"APIs\" in other contexts, such as the name of\nthis message itself. See https://cloud.google.com/apis/design/glossary for\ndetailed terminology.", @@ -619,7 +619,7 @@ "type": "object" }, "Documentation": { - "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nComments can be made conditional using a visibility label. The below\ntext will be only rendered if the `BETA` label is available:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;--BETA: comment for BETA users --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", + "description": "`Documentation` provides the information for describing a service.\n\nExample:\n\u003cpre\u003e\u003ccode\u003edocumentation:\n summary: \u003e\n The Google Calendar API gives access\n to most calendar features.\n pages:\n - name: Overview\n content: \u0026#40;== include google/foo/overview.md ==\u0026#41;\n - name: Tutorial\n content: \u0026#40;== include google/foo/tutorial.md ==\u0026#41;\n subpages;\n - name: Java\n content: \u0026#40;== include google/foo/tutorial_java.md ==\u0026#41;\n rules:\n - selector: google.calendar.Calendar.Get\n description: \u003e\n ...\n - selector: google.calendar.Calendar.Put\n description: \u003e\n ...\n\u003c/code\u003e\u003c/pre\u003e\nDocumentation is provided in markdown syntax. In addition to\nstandard markdown features, definition lists, tables and fenced\ncode blocks are supported. Section headers can be provided and are\ninterpreted relative to the section nesting of the context where\na documentation fragment is embedded.\n\nDocumentation from the IDL is merged with documentation defined\nvia the config at normalization time, where documentation provided\nby config rules overrides IDL provided.\n\nA number of constructs specific to the API platform are supported\nin documentation text.\n\nIn order to reference a proto element, the following\nnotation can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;fully.qualified.proto.name]\u0026#91;]\u003c/code\u003e\u003c/pre\u003e\nTo override the display text used for the link, this can be used:\n\u003cpre\u003e\u003ccode\u003e\u0026#91;display text]\u0026#91;fully.qualified.proto.name]\u003c/code\u003e\u003c/pre\u003e\nText can be excluded from doc using the following notation:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;-- internal comment --\u0026#41;\u003c/code\u003e\u003c/pre\u003e\n\nA few directives are available in documentation. Note that\ndirectives must appear on a single line to be properly\nidentified. The `include` directive includes a markdown file from\nan external source:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== include path/to/file ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe `resource_for` directive marks a message to be the resource of\na collection in REST view. If it is not specified, tools attempt\nto infer the resource from the operations in a collection:\n\u003cpre\u003e\u003ccode\u003e\u0026#40;== resource_for v1.shelves.books ==\u0026#41;\u003c/code\u003e\u003c/pre\u003e\nThe directive `suppress_warning` does not directly affect documentation\nand is documented together with service config validation.", "id": "Documentation", "properties": { "documentationRootUrl": { @@ -1736,10 +1736,6 @@ "usage": { "$ref": "Usage", "description": "Configuration controlling usage of this service." - }, - "visibility": { - "$ref": "Visibility", - "description": "API visibility configuration." } }, "type": "object" @@ -1973,35 +1969,6 @@ } }, "type": "object" - }, - "Visibility": { - "description": "`Visibility` defines restrictions for the visibility of service\nelements. Restrictions are specified using visibility labels\n(e.g., TRUSTED_TESTER) that are elsewhere linked to users and projects.\n\nUsers and projects can have access to more than one visibility label. The\neffective visibility for multiple labels is the union of each label's\nelements, plus any unrestricted elements.\n\nIf an element and its parents have no restrictions, visibility is\nunconditionally granted.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: TRUSTED_TESTER\n - selector: google.calendar.Calendar.Delegate\n restriction: GOOGLE_INTERNAL\n\nHere, all methods are publicly visible except for the restricted methods\nEnhancedSearch and Delegate.", - "id": "Visibility", - "properties": { - "rules": { - "description": "A list of visibility rules that apply to individual API elements.\n\n**NOTE:** All service configuration rules follow \"last one wins\" order.", - "items": { - "$ref": "VisibilityRule" - }, - "type": "array" - } - }, - "type": "object" - }, - "VisibilityRule": { - "description": "A visibility rule provides visibility configuration for an individual API\nelement.", - "id": "VisibilityRule", - "properties": { - "restriction": { - "description": "A comma-separated list of visibility labels that apply to the `selector`.\nAny of the listed labels can be used to grant the visibility.\n\nIf a rule has multiple labels, removing one of the labels but not all of\nthem can break clients.\n\nExample:\n\n visibility:\n rules:\n - selector: google.calendar.Calendar.EnhancedSearch\n restriction: GOOGLE_INTERNAL, TRUSTED_TESTER\n\nRemoving GOOGLE_INTERNAL from this restriction will break clients that\nrely on this method and only had access to it through GOOGLE_INTERNAL.", - "type": "string" - }, - "selector": { - "description": "Selects methods, messages, fields, enums, etc. to which this rule applies.\n\nRefer to selector for syntax details.", - "type": "string" - } - }, - "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go index 834b28a10..5d764d7da 100644 --- a/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go +++ b/vendor/google.golang.org/api/serviceuser/v1/serviceuser-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "serviceuser:v1" const apiName = "serviceuser" const apiVersion = "v1" -const basePath = "https://serviceuser.googleapis.com/" +const basePath = "https://content-serviceuser.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -1073,11 +1073,7 @@ type DisableServiceRequest struct { // Text can be excluded from doc using the following // notation: //
(-- internal comment --)
-// Comments can be made conditional using a visibility label. The -// below -// text will be only rendered if the `BETA` label is -// available: -//
(--BETA: comment for BETA users --)
+// // A few directives are available in documentation. Note that // directives must appear on a single line to be properly // identified. The `include` directive includes a markdown file from @@ -3598,9 +3594,6 @@ type Service struct { // Usage: Configuration controlling usage of this service. Usage *Usage `json:"usage,omitempty"` - // Visibility: API visibility configuration. - Visibility *Visibility `json:"visibility,omitempty"` - // ForceSendFields is a list of field names (e.g. "Apis") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -4182,119 +4175,6 @@ func (s *UsageRule) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Visibility: `Visibility` defines restrictions for the visibility of -// service -// elements. Restrictions are specified using visibility labels -// (e.g., TRUSTED_TESTER) that are elsewhere linked to users and -// projects. -// -// Users and projects can have access to more than one visibility label. -// The -// effective visibility for multiple labels is the union of each -// label's -// elements, plus any unrestricted elements. -// -// If an element and its parents have no restrictions, visibility -// is -// unconditionally granted. -// -// Example: -// -// visibility: -// rules: -// - selector: google.calendar.Calendar.EnhancedSearch -// restriction: TRUSTED_TESTER -// - selector: google.calendar.Calendar.Delegate -// restriction: GOOGLE_INTERNAL -// -// Here, all methods are publicly visible except for the restricted -// methods -// EnhancedSearch and Delegate. -type Visibility struct { - // Rules: A list of visibility rules that apply to individual API - // elements. - // - // **NOTE:** All service configuration rules follow "last one wins" - // order. - Rules []*VisibilityRule `json:"rules,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Rules") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Rules") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *Visibility) MarshalJSON() ([]byte, error) { - type NoMethod Visibility - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// VisibilityRule: A visibility rule provides visibility configuration -// for an individual API -// element. -type VisibilityRule struct { - // Restriction: A comma-separated list of visibility labels that apply - // to the `selector`. - // Any of the listed labels can be used to grant the visibility. - // - // If a rule has multiple labels, removing one of the labels but not all - // of - // them can break clients. - // - // Example: - // - // visibility: - // rules: - // - selector: google.calendar.Calendar.EnhancedSearch - // restriction: GOOGLE_INTERNAL, TRUSTED_TESTER - // - // Removing GOOGLE_INTERNAL from this restriction will break clients - // that - // rely on this method and only had access to it through - // GOOGLE_INTERNAL. - Restriction string `json:"restriction,omitempty"` - - // Selector: Selects methods, messages, fields, enums, etc. to which - // this rule applies. - // - // Refer to selector for syntax details. - Selector string `json:"selector,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Restriction") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Restriction") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *VisibilityRule) MarshalJSON() ([]byte, error) { - type NoMethod VisibilityRule - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - // method id "serviceuser.projects.services.disable": type ProjectsServicesDisableCall struct { diff --git a/vendor/google.golang.org/api/sheets/v4/sheets-api.json b/vendor/google.golang.org/api/sheets/v4/sheets-api.json index 596e59df5..92bae3a88 100644 --- a/vendor/google.golang.org/api/sheets/v4/sheets-api.json +++ b/vendor/google.golang.org/api/sheets/v4/sheets-api.json @@ -819,7 +819,7 @@ } } }, - "revision": "20180306", + "revision": "20180313", "rootUrl": "https://sheets.googleapis.com/", "schemas": { "AddBandingRequest": { @@ -1323,7 +1323,7 @@ "type": "array" }, "stackedType": { - "description": "The stacked type for charts that support vertical stacking.\nApplies to Area, Bar, Column, and Stepped Area charts.", + "description": "The stacked type for charts that support vertical stacking.\nApplies to Area, Bar, Column, Combo, and Stepped Area charts.", "enum": [ "BASIC_CHART_STACKED_TYPE_UNSPECIFIED", "NOT_STACKED", @@ -2334,6 +2334,10 @@ "$ref": "TextPosition", "description": "The title text position.\nThis field is optional." }, + "treemapChart": { + "$ref": "TreemapChartSpec", + "description": "A treemap chart specification." + }, "waterfallChart": { "$ref": "WaterfallChartSpec", "description": "A waterfall chart specification." @@ -5155,6 +5159,88 @@ }, "type": "object" }, + "TreemapChartColorScale": { + "description": "A color scale for a treemap chart.", + "id": "TreemapChartColorScale", + "properties": { + "maxValueColor": { + "$ref": "Color", + "description": "The background color for cells with a color value greater than or equal\nto maxValue. Defaults to #109618 if not\nspecified." + }, + "midValueColor": { + "$ref": "Color", + "description": "The background color for cells with a color value at the midpoint between\nminValue and\nmaxValue. Defaults to #efe6dc if not\nspecified." + }, + "minValueColor": { + "$ref": "Color", + "description": "The background color for cells with a color value less than or equal to\nminValue. Defaults to #dc3912 if not\nspecified." + }, + "noDataColor": { + "$ref": "Color", + "description": "The background color for cells that have no color data associated with\nthem. Defaults to #000000 if not specified." + } + }, + "type": "object" + }, + "TreemapChartSpec": { + "description": "A \u003ca href=\"/chart/interactive/docs/gallery/treemap\"\u003eTreemap chart\u003c/a\u003e.", + "id": "TreemapChartSpec", + "properties": { + "colorData": { + "$ref": "ChartData", + "description": "The data that determines the background color of each treemap data cell.\nThis field is optional. If not specified, size_data will be used to\ndetermine background colors. If specified, the data is expected to be\nnumeric. color_scale will determine how the values in this data map to\ndata cell background colors." + }, + "colorScale": { + "$ref": "TreemapChartColorScale", + "description": "The color scale for data cells in the treemap chart. Data cells are\nassigned colors based on their color values. These color values come from\ncolor_data, or from size_data if color_data is not specified.\nCells with color values less than or equal to min_value will\nhave minValueColor as their\nbackground color. Cells with color values greater than or equal to\nmax_value will have\nmaxValueColor as their background\ncolor. Cells with color values between min_value and max_value will\nhave background colors on a gradient between\nminValueColor and\nmaxValueColor, the midpoint of\nthe gradient being midValueColor.\nCells with missing or non-numeric color values will have\nnoDataColor as their background\ncolor." + }, + "headerColor": { + "$ref": "Color", + "description": "The background color for header cells." + }, + "hideTooltips": { + "description": "True to hide tooltips.", + "type": "boolean" + }, + "hintedLevels": { + "description": "The number of additional data levels beyond the labeled levels to be shown\non the treemap chart. These levels are not interactive and are shown\nwithout their labels. Defaults to 0 if not specified.", + "format": "int32", + "type": "integer" + }, + "labels": { + "$ref": "ChartData", + "description": "The data that contains the treemap cell labels." + }, + "levels": { + "description": "The number of data levels to show on the treemap chart. These levels are\ninteractive and are shown with their labels. Defaults to 2 if not\nspecified.", + "format": "int32", + "type": "integer" + }, + "maxValue": { + "description": "The maximum possible data value. Cells with values greater than this will\nhave the same color as cells with this value. If not specified, defaults\nto the actual maximum value from color_data, or the maximum value from\nsize_data if color_data is not specified.", + "format": "double", + "type": "number" + }, + "minValue": { + "description": "The minimum possible data value. Cells with values less than this will\nhave the same color as cells with this value. If not specified, defaults\nto the actual minimum value from color_data, or the minimum value from\nsize_data if color_data is not specified.", + "format": "double", + "type": "number" + }, + "parentLabels": { + "$ref": "ChartData", + "description": "The data the contains the treemap cells' parent labels." + }, + "sizeData": { + "$ref": "ChartData", + "description": "The data that determines the size of each treemap data cell. This data is\nexpected to be numeric. The cells corresponding to non-numeric or missing\ndata will not be rendered. If color_data is not specified, this data\nwill be used to determine data cell background colors as well." + }, + "textFormat": { + "$ref": "TextFormat", + "description": "The text format for all labels on the chart." + } + }, + "type": "object" + }, "UnmergeCellsRequest": { "description": "Unmerges cells in the given range.", "id": "UnmergeCellsRequest", diff --git a/vendor/google.golang.org/api/sheets/v4/sheets-gen.go b/vendor/google.golang.org/api/sheets/v4/sheets-gen.go index 0f6165556..1a9761367 100644 --- a/vendor/google.golang.org/api/sheets/v4/sheets-gen.go +++ b/vendor/google.golang.org/api/sheets/v4/sheets-gen.go @@ -1114,7 +1114,7 @@ type BasicChartSpec struct { // StackedType: The stacked type for charts that support vertical // stacking. - // Applies to Area, Bar, Column, and Stepped Area charts. + // Applies to Area, Bar, Column, Combo, and Stepped Area charts. // // Possible values: // "BASIC_CHART_STACKED_TYPE_UNSPECIFIED" - Default value, do not use. @@ -2895,6 +2895,9 @@ type ChartSpec struct { // This field is optional. TitleTextPosition *TextPosition `json:"titleTextPosition,omitempty"` + // TreemapChart: A treemap chart specification. + TreemapChart *TreemapChartSpec `json:"treemapChart,omitempty"` + // WaterfallChart: A waterfall chart specification. WaterfallChart *WaterfallChartSpec `json:"waterfallChart,omitempty"` @@ -7888,6 +7891,186 @@ func (s *TextToColumnsRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// TreemapChartColorScale: A color scale for a treemap chart. +type TreemapChartColorScale struct { + // MaxValueColor: The background color for cells with a color value + // greater than or equal + // to maxValue. Defaults to #109618 if not + // specified. + MaxValueColor *Color `json:"maxValueColor,omitempty"` + + // MidValueColor: The background color for cells with a color value at + // the midpoint between + // minValue and + // maxValue. Defaults to #efe6dc if not + // specified. + MidValueColor *Color `json:"midValueColor,omitempty"` + + // MinValueColor: The background color for cells with a color value less + // than or equal to + // minValue. Defaults to #dc3912 if not + // specified. + MinValueColor *Color `json:"minValueColor,omitempty"` + + // NoDataColor: The background color for cells that have no color data + // associated with + // them. Defaults to #000000 if not specified. + NoDataColor *Color `json:"noDataColor,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxValueColor") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxValueColor") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TreemapChartColorScale) MarshalJSON() ([]byte, error) { + type NoMethod TreemapChartColorScale + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TreemapChartSpec: A Treemap chart. +type TreemapChartSpec struct { + // ColorData: The data that determines the background color of each + // treemap data cell. + // This field is optional. If not specified, size_data will be used + // to + // determine background colors. If specified, the data is expected to + // be + // numeric. color_scale will determine how the values in this data map + // to + // data cell background colors. + ColorData *ChartData `json:"colorData,omitempty"` + + // ColorScale: The color scale for data cells in the treemap chart. Data + // cells are + // assigned colors based on their color values. These color values come + // from + // color_data, or from size_data if color_data is not specified. + // Cells with color values less than or equal to min_value will + // have minValueColor as their + // background color. Cells with color values greater than or equal + // to + // max_value will have + // maxValueColor as their background + // color. Cells with color values between min_value and max_value + // will + // have background colors on a gradient between + // minValueColor and + // maxValueColor, the midpoint of + // the gradient being midValueColor. + // Cells with missing or non-numeric color values will have + // noDataColor as their background + // color. + ColorScale *TreemapChartColorScale `json:"colorScale,omitempty"` + + // HeaderColor: The background color for header cells. + HeaderColor *Color `json:"headerColor,omitempty"` + + // HideTooltips: True to hide tooltips. + HideTooltips bool `json:"hideTooltips,omitempty"` + + // HintedLevels: The number of additional data levels beyond the labeled + // levels to be shown + // on the treemap chart. These levels are not interactive and are + // shown + // without their labels. Defaults to 0 if not specified. + HintedLevels int64 `json:"hintedLevels,omitempty"` + + // Labels: The data that contains the treemap cell labels. + Labels *ChartData `json:"labels,omitempty"` + + // Levels: The number of data levels to show on the treemap chart. These + // levels are + // interactive and are shown with their labels. Defaults to 2 if + // not + // specified. + Levels int64 `json:"levels,omitempty"` + + // MaxValue: The maximum possible data value. Cells with values greater + // than this will + // have the same color as cells with this value. If not specified, + // defaults + // to the actual maximum value from color_data, or the maximum value + // from + // size_data if color_data is not specified. + MaxValue float64 `json:"maxValue,omitempty"` + + // MinValue: The minimum possible data value. Cells with values less + // than this will + // have the same color as cells with this value. If not specified, + // defaults + // to the actual minimum value from color_data, or the minimum value + // from + // size_data if color_data is not specified. + MinValue float64 `json:"minValue,omitempty"` + + // ParentLabels: The data the contains the treemap cells' parent labels. + ParentLabels *ChartData `json:"parentLabels,omitempty"` + + // SizeData: The data that determines the size of each treemap data + // cell. This data is + // expected to be numeric. The cells corresponding to non-numeric or + // missing + // data will not be rendered. If color_data is not specified, this + // data + // will be used to determine data cell background colors as well. + SizeData *ChartData `json:"sizeData,omitempty"` + + // TextFormat: The text format for all labels on the chart. + TextFormat *TextFormat `json:"textFormat,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ColorData") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ColorData") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TreemapChartSpec) MarshalJSON() ([]byte, error) { + type NoMethod TreemapChartSpec + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *TreemapChartSpec) UnmarshalJSON(data []byte) error { + type NoMethod TreemapChartSpec + var s1 struct { + MaxValue gensupport.JSONFloat64 `json:"maxValue"` + MinValue gensupport.JSONFloat64 `json:"minValue"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.MaxValue = float64(s1.MaxValue) + s.MinValue = float64(s1.MinValue) + return nil +} + // UnmergeCellsRequest: Unmerges cells in the given range. type UnmergeCellsRequest struct { // Range: The range within which all cells should be unmerged. diff --git a/vendor/google.golang.org/api/slides/v1/slides-api.json b/vendor/google.golang.org/api/slides/v1/slides-api.json index 0bb595561..dab840b9d 100644 --- a/vendor/google.golang.org/api/slides/v1/slides-api.json +++ b/vendor/google.golang.org/api/slides/v1/slides-api.json @@ -311,7 +311,7 @@ } } }, - "revision": "20180301", + "revision": "20180312", "rootUrl": "https://slides.googleapis.com/", "schemas": { "AffineTransform": { @@ -4315,7 +4315,7 @@ "type": "boolean" }, "end": { - "description": "The time at which to end playback, measured in seconds from the beginning\nof the video.\nIf set, the end time should be after the start time.\nIf not set or if you set this to a value that exceeds the video duration,\nthe video will be played until its end.", + "description": "The time at which to end playback, measured in seconds from the beginning\nof the video.\nIf set, the end time should be after the start time.\nIf not set or if you set this to a value that exceeds the video's length,\nthe video will be played until its end.", "format": "uint32", "type": "integer" }, diff --git a/vendor/google.golang.org/api/slides/v1/slides-gen.go b/vendor/google.golang.org/api/slides/v1/slides-gen.go index 32ab99463..e01fc7117 100644 --- a/vendor/google.golang.org/api/slides/v1/slides-gen.go +++ b/vendor/google.golang.org/api/slides/v1/slides-gen.go @@ -7524,8 +7524,8 @@ type VideoProperties struct { // beginning // of the video. // If set, the end time should be after the start time. - // If not set or if you set this to a value that exceeds the video - // duration, + // If not set or if you set this to a value that exceeds the video's + // length, // the video will be played until its end. End int64 `json:"end,omitempty"` diff --git a/vendor/google.golang.org/api/speech/v1/speech-api.json b/vendor/google.golang.org/api/speech/v1/speech-api.json index 68271fff0..1ea80cd52 100644 --- a/vendor/google.golang.org/api/speech/v1/speech-api.json +++ b/vendor/google.golang.org/api/speech/v1/speech-api.json @@ -15,6 +15,7 @@ "description": "Converts audio to text by applying powerful neural network models.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/speech/", + "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" @@ -184,7 +185,7 @@ } } }, - "revision": "20180227", + "revision": "20180312", "rootUrl": "https://speech.googleapis.com/", "schemas": { "LongRunningRecognizeRequest": { @@ -443,7 +444,7 @@ } }, "servicePath": "", - "title": "Google Cloud Speech API", + "title": "Cloud Speech API", "version": "v1", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/speech/v1/speech-gen.go b/vendor/google.golang.org/api/speech/v1/speech-gen.go index 6eb494db4..c71db1922 100644 --- a/vendor/google.golang.org/api/speech/v1/speech-gen.go +++ b/vendor/google.golang.org/api/speech/v1/speech-gen.go @@ -1,4 +1,4 @@ -// Package speech provides access to the Google Cloud Speech API. +// Package speech provides access to the Cloud Speech API. // // See https://cloud.google.com/speech/ // diff --git a/vendor/google.golang.org/api/speech/v1beta1/speech-api.json b/vendor/google.golang.org/api/speech/v1beta1/speech-api.json index 768f4fabf..0678b8884 100644 --- a/vendor/google.golang.org/api/speech/v1beta1/speech-api.json +++ b/vendor/google.golang.org/api/speech/v1beta1/speech-api.json @@ -15,6 +15,7 @@ "description": "Converts audio to text by applying powerful neural network models.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/speech/", + "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", "x32": "http://www.google.com/images/icons/product/search-32.gif" @@ -184,7 +185,7 @@ } } }, - "revision": "20180130", + "revision": "20180312", "rootUrl": "https://speech.googleapis.com/", "schemas": { "AsyncRecognizeRequest": { @@ -404,7 +405,7 @@ } }, "servicePath": "", - "title": "Google Cloud Speech API", + "title": "Cloud Speech API", "version": "v1beta1", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go b/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go index 9f56ca477..132b22695 100644 --- a/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go +++ b/vendor/google.golang.org/api/speech/v1beta1/speech-gen.go @@ -1,4 +1,4 @@ -// Package speech provides access to the Google Cloud Speech API. +// Package speech provides access to the Cloud Speech API. // // See https://cloud.google.com/speech/ // diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json index 0fff227da..b2c914010 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -18,7 +18,7 @@ "description": "Creates and configures Cloud SQL instances, which provide fully-managed MySQL databases.", "discoveryVersion": "v1", "documentationLink": "https://cloud.google.com/sql/docs/reference/latest", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/mSPyWksLXfT4Y1yNDG7t3sTwHiI\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/FsDAXvP1EaK5M44tzgwKdZouc_s\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -1524,7 +1524,7 @@ } } }, - "revision": "20180227", + "revision": "20180314", "rootUrl": "https://www.googleapis.com/", "schemas": { "AclEntry": { @@ -1570,7 +1570,7 @@ "type": "string" }, "replicationLogArchivingEnabled": { - "description": "Whether replication log archiving is enabled. Replication log archiving is required for the point-in-time recovery (PITR) feature. PostgreSQL instances only.", + "description": "Reserved for future use.", "type": "boolean" }, "startTime": { @@ -1703,7 +1703,7 @@ "type": "string" }, "pitrTimestampMs": { - "description": "The epoch timestamp, in milliseconds, of the time to which a point-in-time recovery (PITR) is performed. PostgreSQL instances only. For MySQL instances, use the binLogCoordinates property.", + "description": "Reserved for future use.", "format": "int64", "type": "string" } diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go index c57aa2498..38abb7a7a 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -222,9 +222,7 @@ type BackupConfiguration struct { // Kind: This is always sql#backupConfiguration. Kind string `json:"kind,omitempty"` - // ReplicationLogArchivingEnabled: Whether replication log archiving is - // enabled. Replication log archiving is required for the point-in-time - // recovery (PITR) feature. PostgreSQL instances only. + // ReplicationLogArchivingEnabled: Reserved for future use. ReplicationLogArchivingEnabled bool `json:"replicationLogArchivingEnabled,omitempty"` // StartTime: Start time for the daily backup configuration in UTC @@ -419,10 +417,7 @@ type CloneContext struct { // Kind: This is always sql#cloneContext. Kind string `json:"kind,omitempty"` - // PitrTimestampMs: The epoch timestamp, in milliseconds, of the time to - // which a point-in-time recovery (PITR) is performed. PostgreSQL - // instances only. For MySQL instances, use the binLogCoordinates - // property. + // PitrTimestampMs: Reserved for future use. PitrTimestampMs int64 `json:"pitrTimestampMs,omitempty,string"` // ForceSendFields is a list of field names (e.g. "BinLogCoordinates") diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 9531065d5..2d23f028b 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/jLupXEh5MvYeA2ibX_aBxLuxU28\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/nD7tyZqYOGFELa4QRBOZJ8raFKA\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -219,7 +219,7 @@ ] }, "patch": { - "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + "description": "Patches an ACL entry on the specified bucket.", "httpMethod": "PATCH", "id": "storage.bucketAccessControls.patch", "parameterOrder": [ @@ -1050,7 +1050,7 @@ ] }, "patch": { - "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + "description": "Patches a default object ACL entry on the specified bucket.", "httpMethod": "PATCH", "id": "storage.defaultObjectAccessControls.patch", "parameterOrder": [ @@ -1456,7 +1456,7 @@ ] }, "patch": { - "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + "description": "Patches an ACL entry on the specified object.", "httpMethod": "PATCH", "id": "storage.objectAccessControls.patch", "parameterOrder": [ @@ -2756,7 +2756,7 @@ } } }, - "revision": "20180118", + "revision": "20180305", "rootUrl": "https://www.googleapis.com/", "schemas": { "Bucket": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 0db775311..36846eb54 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2513,8 +2513,7 @@ type BucketAccessControlsPatchCall struct { header_ http.Header } -// Patch: Updates an ACL entry on the specified bucket. This method -// supports patch semantics. +// Patch: Patches an ACL entry on the specified bucket. func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -2617,7 +2616,7 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke } return ret, nil // { - // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.", + // "description": "Patches an ACL entry on the specified bucket.", // "httpMethod": "PATCH", // "id": "storage.bucketAccessControls.patch", // "parameterOrder": [ @@ -5524,8 +5523,7 @@ type DefaultObjectAccessControlsPatchCall struct { header_ http.Header } -// Patch: Updates a default object ACL entry on the specified bucket. -// This method supports patch semantics. +// Patch: Patches a default object ACL entry on the specified bucket. func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -5628,7 +5626,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.", + // "description": "Patches a default object ACL entry on the specified bucket.", // "httpMethod": "PATCH", // "id": "storage.defaultObjectAccessControls.patch", // "parameterOrder": [ @@ -7093,8 +7091,7 @@ type ObjectAccessControlsPatchCall struct { header_ http.Header } -// Patch: Updates an ACL entry on the specified object. This method -// supports patch semantics. +// Patch: Patches an ACL entry on the specified object. func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -7207,7 +7204,7 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec } return ret, nil // { - // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.", + // "description": "Patches an ACL entry on the specified object.", // "httpMethod": "PATCH", // "id": "storage.objectAccessControls.patch", // "parameterOrder": [ diff --git a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json index 0ebf00016..3b8f85b27 100644 --- a/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json +++ b/vendor/google.golang.org/api/storagetransfer/v1/storagetransfer-api.json @@ -430,7 +430,7 @@ } } }, - "revision": "20180228", + "revision": "20180319", "rootUrl": "https://storagetransfer.googleapis.com/", "schemas": { "AwsAccessKey": { diff --git a/vendor/google.golang.org/api/testing/v1/testing-api.json b/vendor/google.golang.org/api/testing/v1/testing-api.json index 117a7ab67..80f1e64d9 100644 --- a/vendor/google.golang.org/api/testing/v1/testing-api.json +++ b/vendor/google.golang.org/api/testing/v1/testing-api.json @@ -281,7 +281,7 @@ } } }, - "revision": "20180307", + "revision": "20180316", "rootUrl": "https://testing.googleapis.com/", "schemas": { "Account": { @@ -513,6 +513,10 @@ "type": "string" }, "type": "array" + }, + "videoRecordingNotSupported": { + "description": "True if and only if tests with this model DO NOT have video output.\nSee also TestSpecification.disable_video_recording\n@OutputOnly", + "type": "boolean" } }, "type": "object" @@ -1150,6 +1154,10 @@ "type": "string" }, "type": "array" + }, + "videoRecordingDisabled": { + "description": "Indicates that video will not be recorded for this execution either because\nthe user chose to disable it or the device does not support it.\nSee AndroidModel.video_recording_not_supported\n@OutputOnly", + "type": "boolean" } }, "type": "object" diff --git a/vendor/google.golang.org/api/testing/v1/testing-gen.go b/vendor/google.golang.org/api/testing/v1/testing-gen.go index 58694a80e..9ed6c6d63 100644 --- a/vendor/google.golang.org/api/testing/v1/testing-gen.go +++ b/vendor/google.golang.org/api/testing/v1/testing-gen.go @@ -512,6 +512,12 @@ type AndroidModel struct { // Examples: "default", "preview", "deprecated" Tags []string `json:"tags,omitempty"` + // VideoRecordingNotSupported: True if and only if tests with this model + // DO NOT have video output. + // See also TestSpecification.disable_video_recording + // @OutputOnly + VideoRecordingNotSupported bool `json:"videoRecordingNotSupported,omitempty"` + // ForceSendFields is a list of field names (e.g. "Brand") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1842,6 +1848,13 @@ type TestDetails struct { // @OutputOnly ProgressMessages []string `json:"progressMessages,omitempty"` + // VideoRecordingDisabled: Indicates that video will not be recorded for + // this execution either because + // the user chose to disable it or the device does not support it. + // See AndroidModel.video_recording_not_supported + // @OutputOnly + VideoRecordingDisabled bool `json:"videoRecordingDisabled,omitempty"` + // ForceSendFields is a list of field names (e.g. "ErrorMessage") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, diff --git a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json index 18adba83e..7c9f5e1ae 100644 --- a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json +++ b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-api.json @@ -15,7 +15,7 @@ "description": "Reads and publishes results from Firebase Test Lab.", "discoveryVersion": "v1", "documentationLink": "https://firebase.google.com/docs/test-lab/", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/OOjgT8IY-Au1VG1YGOqumfltLCM\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/2jJRzbiUma3M3ST-TMvZSfvHtoo\"", "icons": { "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png", "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png" @@ -1161,7 +1161,7 @@ } } }, - "revision": "20180305", + "revision": "20180322", "rootUrl": "https://www.googleapis.com/", "schemas": { "AndroidAppInfo": { @@ -2289,7 +2289,9 @@ "enum": [ "anr", "compatibleWithOrchestrator", + "completeRoboScriptExecution", "fatalException", + "incompleteRoboScriptExecution", "launcherActivityNotFound", "nativeCrash", "startActivityNotFound", @@ -2304,6 +2306,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -2388,7 +2392,7 @@ "type": "object" }, "Timestamp": { - "description": "A Timestamp represents a point in time independent of any time zone or calendar, represented as seconds and fractions of seconds at nanosecond resolution in UTC Epoch time. It is encoded using the Proleptic Gregorian Calendar which extends the Gregorian calendar backwards to year one. It is encoded assuming all minutes are 60 seconds long, i.e. leap seconds are \"smeared\" so that no leap second table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By restricting to that range, we ensure that we can convert to and from RFC 3339 date strings. See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).\n\n# Examples\n\nExample 1: Compute Timestamp from POSIX `time()`.\n\nTimestamp timestamp; timestamp.set_seconds(time(NULL)); timestamp.set_nanos(0);\n\nExample 2: Compute Timestamp from POSIX `gettimeofday()`.\n\nstruct timeval tv; gettimeofday(\u0026tv, NULL);\n\nTimestamp timestamp; timestamp.set_seconds(tv.tv_sec); timestamp.set_nanos(tv.tv_usec * 1000);\n\nExample 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n\nFILETIME ft; GetSystemTimeAsFileTime(\u0026ft); UINT64 ticks = (((UINT64)ft.dwHighDateTime) \u003c\u003c 32) | ft.dwLowDateTime;\n\n// A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. Timestamp timestamp; timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n\nExample 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n\nlong millis = System.currentTimeMillis();\n\nTimestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) .setNanos((int) ((millis % 1000) * 1000000)).build();\n\n\n\nExample 5: Compute Timestamp from current time in Python.\n\ntimestamp = Timestamp() timestamp.GetCurrentTime()\n\n# JSON Mapping\n\nIn JSON format, the Timestamp type is encoded as a string in the [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\" where {year} is always expressed using four digits while {month}, {day}, {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone is required, though only UTC (as indicated by \"Z\") is currently supported.\n\nFor example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past 01:30 UTC on January 15, 2017.\n\nIn JavaScript, one can convert a Date object to this format using the standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] method. In Python, a standard `datetime.datetime` object can be converted to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- ) to obtain a formatter capable of generating timestamps in this format.", + "description": "A Timestamp represents a point in time independent of any time zone or calendar, represented as seconds and fractions of seconds at nanosecond resolution in UTC Epoch time. It is encoded using the Proleptic Gregorian Calendar which extends the Gregorian calendar backwards to year one. It is encoded assuming all minutes are 60 seconds long, i.e. leap seconds are \"smeared\" so that no leap second table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By restricting to that range, we ensure that we can convert to and from RFC 3339 date strings. See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).\n\n# Examples\n\nExample 1: Compute Timestamp from POSIX `time()`.\n\nTimestamp timestamp; timestamp.set_seconds(time(NULL)); timestamp.set_nanos(0);\n\nExample 2: Compute Timestamp from POSIX `gettimeofday()`.\n\nstruct timeval tv; gettimeofday(\u0026tv, NULL);\n\nTimestamp timestamp; timestamp.set_seconds(tv.tv_sec); timestamp.set_nanos(tv.tv_usec * 1000);\n\nExample 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n\nFILETIME ft; GetSystemTimeAsFileTime(\u0026ft); UINT64 ticks = (((UINT64)ft.dwHighDateTime) \u003c\u003c 32) | ft.dwLowDateTime;\n\n// A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. Timestamp timestamp; timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n\nExample 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n\nlong millis = System.currentTimeMillis();\n\nTimestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) .setNanos((int) ((millis % 1000) * 1000000)).build();\n\n\n\nExample 5: Compute Timestamp from current time in Python.\n\ntimestamp = Timestamp() timestamp.GetCurrentTime()\n\n# JSON Mapping\n\nIn JSON format, the Timestamp type is encoded as a string in the [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\" where {year} is always expressed using four digits while {month}, {day}, {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone is required. A proto3 JSON serializer should always use UTC (as indicated by \"Z\") when printing the Timestamp type and a proto3 JSON parser should be able to accept both UTC and other timezones (as indicated by an offset).\n\nFor example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past 01:30 UTC on January 15, 2017.\n\nIn JavaScript, one can convert a Date object to this format using the standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] method. In Python, a standard `datetime.datetime` object can be converted to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- ) to obtain a formatter capable of generating timestamps in this format.", "id": "Timestamp", "properties": { "nanos": { diff --git a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go index 817faf435..a9a2bfb84 100644 --- a/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go +++ b/vendor/google.golang.org/api/toolresults/v1beta3/toolresults-gen.go @@ -2527,7 +2527,9 @@ type TestIssue struct { // Possible values: // "anr" // "compatibleWithOrchestrator" + // "completeRoboScriptExecution" // "fatalException" + // "incompleteRoboScriptExecution" // "launcherActivityNotFound" // "nativeCrash" // "startActivityNotFound" @@ -2775,8 +2777,10 @@ func (s *Thumbnail) MarshalJSON() ([]byte, error) { // {day}, {hour}, {min}, and {sec} are zero-padded to two digits each. // The fractional seconds, which can go up to 9 digits (i.e. up to 1 // nanosecond resolution), are optional. The "Z" suffix indicates the -// timezone ("UTC"); the timezone is required, though only UTC (as -// indicated by "Z") is currently supported. +// timezone ("UTC"); the timezone is required. A proto3 JSON serializer +// should always use UTC (as indicated by "Z") when printing the +// Timestamp type and a proto3 JSON parser should be able to accept both +// UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. diff --git a/vendor/google.golang.org/api/tpu/v1alpha1/tpu-api.json b/vendor/google.golang.org/api/tpu/v1alpha1/tpu-api.json index 990cd1594..65b9b8795 100644 --- a/vendor/google.golang.org/api/tpu/v1alpha1/tpu-api.json +++ b/vendor/google.golang.org/api/tpu/v1alpha1/tpu-api.json @@ -9,7 +9,7 @@ } }, "basePath": "", - "baseUrl": "https://tpu.googleapis.com/", + "baseUrl": "https://content-tpu.googleapis.com/", "batchPath": "batch", "canonicalName": "TPU", "description": "TPU API provides customers with access to Google TPU technology.", @@ -187,6 +187,81 @@ } }, "resources": { + "acceleratorTypes": { + "methods": { + "get": { + "description": "Gets AcceleratorType.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/acceleratorTypes/{acceleratorTypesId}", + "httpMethod": "GET", + "id": "tpu.projects.locations.acceleratorTypes.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The resource name.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+/acceleratorTypes/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+name}", + "response": { + "$ref": "AcceleratorType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + }, + "list": { + "description": "Lists accelerator types supported by this API.", + "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/acceleratorTypes", + "httpMethod": "GET", + "id": "tpu.projects.locations.acceleratorTypes.list", + "parameterOrder": [ + "parent" + ], + "parameters": { + "filter": { + "description": "List filter.", + "location": "query", + "type": "string" + }, + "orderBy": { + "description": "Sort results.", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "The maximum number of items to return.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "pageToken": { + "description": "The next_page_token value returned from a previous List request, if any.", + "location": "query", + "type": "string" + }, + "parent": { + "description": "The parent resource name.", + "location": "path", + "pattern": "^projects/[^/]+/locations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1alpha1/{+parent}/acceleratorTypes", + "response": { + "$ref": "ListAcceleratorTypesResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + }, "nodes": { "methods": { "create": { @@ -622,15 +697,48 @@ } } }, - "revision": "20180202", - "rootUrl": "https://tpu.googleapis.com/", + "revision": "20180303", + "rootUrl": "https://content-tpu.googleapis.com/", "schemas": { + "AcceleratorType": { + "description": "A accelerator type that a Node can be configured with.", + "id": "AcceleratorType", + "properties": { + "name": { + "description": "The resource name.", + "type": "string" + }, + "type": { + "description": "the accelerator type.", + "type": "string" + } + }, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:\n\n service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", "properties": {}, "type": "object" }, + "ListAcceleratorTypesResponse": { + "description": "Response for ListAcceleratorTypes.", + "id": "ListAcceleratorTypesResponse", + "properties": { + "acceleratorTypes": { + "description": "The listed nodes.", + "items": { + "$ref": "AcceleratorType" + }, + "type": "array" + }, + "nextPageToken": { + "description": "The next page token or empty if none.", + "type": "string" + } + }, + "type": "object" + }, "ListLocationsResponse": { "description": "The response message for Locations.ListLocations.", "id": "ListLocationsResponse", @@ -707,6 +815,10 @@ "description": "A resource that represents Google Cloud Platform location.", "id": "Location", "properties": { + "displayName": { + "description": "The friendly name for this location, typically a nearby city name.\nFor example, \"Tokyo\".", + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/tpu/v1alpha1/tpu-gen.go b/vendor/google.golang.org/api/tpu/v1alpha1/tpu-gen.go index 47d1d23f0..f65c55a6a 100644 --- a/vendor/google.golang.org/api/tpu/v1alpha1/tpu-gen.go +++ b/vendor/google.golang.org/api/tpu/v1alpha1/tpu-gen.go @@ -43,7 +43,7 @@ var _ = ctxhttp.Do const apiId = "tpu:v1alpha1" const apiName = "tpu" const apiVersion = "v1alpha1" -const basePath = "https://tpu.googleapis.com/" +const basePath = "https://content-tpu.googleapis.com/" // OAuth2 scopes used by this API. const ( @@ -89,6 +89,7 @@ type ProjectsService struct { func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { rs := &ProjectsLocationsService{s: s} + rs.AcceleratorTypes = NewProjectsLocationsAcceleratorTypesService(s) rs.Nodes = NewProjectsLocationsNodesService(s) rs.Operations = NewProjectsLocationsOperationsService(s) rs.TensorflowVersions = NewProjectsLocationsTensorflowVersionsService(s) @@ -98,6 +99,8 @@ func NewProjectsLocationsService(s *Service) *ProjectsLocationsService { type ProjectsLocationsService struct { s *Service + AcceleratorTypes *ProjectsLocationsAcceleratorTypesService + Nodes *ProjectsLocationsNodesService Operations *ProjectsLocationsOperationsService @@ -105,6 +108,15 @@ type ProjectsLocationsService struct { TensorflowVersions *ProjectsLocationsTensorflowVersionsService } +func NewProjectsLocationsAcceleratorTypesService(s *Service) *ProjectsLocationsAcceleratorTypesService { + rs := &ProjectsLocationsAcceleratorTypesService{s: s} + return rs +} + +type ProjectsLocationsAcceleratorTypesService struct { + s *Service +} + func NewProjectsLocationsNodesService(s *Service) *ProjectsLocationsNodesService { rs := &ProjectsLocationsNodesService{s: s} return rs @@ -132,6 +144,42 @@ type ProjectsLocationsTensorflowVersionsService struct { s *Service } +// AcceleratorType: A accelerator type that a Node can be configured +// with. +type AcceleratorType struct { + // Name: The resource name. + Name string `json:"name,omitempty"` + + // Type: the accelerator type. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AcceleratorType) MarshalJSON() ([]byte, error) { + type NoMethod AcceleratorType + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Empty: A generic empty message that you can re-use to avoid defining // duplicated // empty messages in your APIs. A typical example is to use it as the @@ -150,6 +198,42 @@ type Empty struct { googleapi.ServerResponse `json:"-"` } +// ListAcceleratorTypesResponse: Response for ListAcceleratorTypes. +type ListAcceleratorTypesResponse struct { + // AcceleratorTypes: The listed nodes. + AcceleratorTypes []*AcceleratorType `json:"acceleratorTypes,omitempty"` + + // NextPageToken: The next page token or empty if none. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AcceleratorTypes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AcceleratorTypes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ListAcceleratorTypesResponse) MarshalJSON() ([]byte, error) { + type NoMethod ListAcceleratorTypesResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ListLocationsResponse: The response message for // Locations.ListLocations. type ListLocationsResponse struct { @@ -296,6 +380,11 @@ func (s *ListTensorFlowVersionsResponse) MarshalJSON() ([]byte, error) { // Location: A resource that represents Google Cloud Platform location. type Location struct { + // DisplayName: The friendly name for this location, typically a nearby + // city name. + // For example, "Tokyo". + DisplayName string `json:"displayName,omitempty"` + // Labels: Cross-service attributes for the location. For example // // {"cloud.googleapis.com/region": "us-east1"} @@ -319,7 +408,7 @@ type Location struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Labels") to + // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -327,10 +416,10 @@ type Location struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Labels") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -1180,6 +1269,352 @@ func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocat } } +// method id "tpu.projects.locations.acceleratorTypes.get": + +type ProjectsLocationsAcceleratorTypesGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets AcceleratorType. +func (r *ProjectsLocationsAcceleratorTypesService) Get(name string) *ProjectsLocationsAcceleratorTypesGetCall { + c := &ProjectsLocationsAcceleratorTypesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsAcceleratorTypesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsAcceleratorTypesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsAcceleratorTypesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsAcceleratorTypesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsAcceleratorTypesGetCall) Context(ctx context.Context) *ProjectsLocationsAcceleratorTypesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsAcceleratorTypesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAcceleratorTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "tpu.projects.locations.acceleratorTypes.get" call. +// Exactly one of *AcceleratorType or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AcceleratorType.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsAcceleratorTypesGetCall) Do(opts ...googleapi.CallOption) (*AcceleratorType, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &AcceleratorType{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets AcceleratorType.", + // "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/acceleratorTypes/{acceleratorTypesId}", + // "httpMethod": "GET", + // "id": "tpu.projects.locations.acceleratorTypes.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The resource name.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+/acceleratorTypes/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha1/{+name}", + // "response": { + // "$ref": "AcceleratorType" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// method id "tpu.projects.locations.acceleratorTypes.list": + +type ProjectsLocationsAcceleratorTypesListCall struct { + s *Service + parent string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists accelerator types supported by this API. +func (r *ProjectsLocationsAcceleratorTypesService) List(parent string) *ProjectsLocationsAcceleratorTypesListCall { + c := &ProjectsLocationsAcceleratorTypesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.parent = parent + return c +} + +// Filter sets the optional parameter "filter": List filter. +func (c *ProjectsLocationsAcceleratorTypesListCall) Filter(filter string) *ProjectsLocationsAcceleratorTypesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sort results. +func (c *ProjectsLocationsAcceleratorTypesListCall) OrderBy(orderBy string) *ProjectsLocationsAcceleratorTypesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageSize sets the optional parameter "pageSize": The maximum number +// of items to return. +func (c *ProjectsLocationsAcceleratorTypesListCall) PageSize(pageSize int64) *ProjectsLocationsAcceleratorTypesListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": The +// next_page_token value returned from a previous List request, if any. +func (c *ProjectsLocationsAcceleratorTypesListCall) PageToken(pageToken string) *ProjectsLocationsAcceleratorTypesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsLocationsAcceleratorTypesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsAcceleratorTypesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsLocationsAcceleratorTypesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsAcceleratorTypesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsLocationsAcceleratorTypesListCall) Context(ctx context.Context) *ProjectsLocationsAcceleratorTypesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsLocationsAcceleratorTypesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsLocationsAcceleratorTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1alpha1/{+parent}/acceleratorTypes") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "parent": c.parent, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "tpu.projects.locations.acceleratorTypes.list" call. +// Exactly one of *ListAcceleratorTypesResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *ListAcceleratorTypesResponse.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsLocationsAcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*ListAcceleratorTypesResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ListAcceleratorTypesResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists accelerator types supported by this API.", + // "flatPath": "v1alpha1/projects/{projectsId}/locations/{locationsId}/acceleratorTypes", + // "httpMethod": "GET", + // "id": "tpu.projects.locations.acceleratorTypes.list", + // "parameterOrder": [ + // "parent" + // ], + // "parameters": { + // "filter": { + // "description": "List filter.", + // "location": "query", + // "type": "string" + // }, + // "orderBy": { + // "description": "Sort results.", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "The maximum number of items to return.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "pageToken": { + // "description": "The next_page_token value returned from a previous List request, if any.", + // "location": "query", + // "type": "string" + // }, + // "parent": { + // "description": "The parent resource name.", + // "location": "path", + // "pattern": "^projects/[^/]+/locations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1alpha1/{+parent}/acceleratorTypes", + // "response": { + // "$ref": "ListAcceleratorTypesResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsLocationsAcceleratorTypesListCall) Pages(ctx context.Context, f func(*ListAcceleratorTypesResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "tpu.projects.locations.nodes.create": type ProjectsLocationsNodesCreateCall struct { diff --git a/vendor/google.golang.org/api/transport/grpc/go18.go b/vendor/google.golang.org/api/transport/grpc/go18.go index 8ab2df15c..a4b4a9945 100644 --- a/vendor/google.golang.org/api/transport/grpc/go18.go +++ b/vendor/google.golang.org/api/transport/grpc/go18.go @@ -22,5 +22,5 @@ import ( ) func addOCStatsHandler(opts []grpc.DialOption) []grpc.DialOption { - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{NoStats: true})) + return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } diff --git a/vendor/google.golang.org/api/transport/http/go18.go b/vendor/google.golang.org/api/transport/http/go18.go index 780fd29c7..1d4bb8e7f 100644 --- a/vendor/google.golang.org/api/transport/http/go18.go +++ b/vendor/google.golang.org/api/transport/http/go18.go @@ -19,15 +19,13 @@ package http import ( "net/http" + "go.opencensus.io/exporter/stackdriver/propagation" "go.opencensus.io/plugin/ochttp" - ocgoogle "go.opencensus.io/plugin/ochttp/propagation/google" ) func addOCTransport(trans http.RoundTripper) http.RoundTripper { return &ochttp.Transport{ - Base: trans, - // TODO(ramonza): enable stats after census-instrumentation/opencensus-go#302 - NoStats: true, - Propagation: &ocgoogle.HTTPFormat{}, + Base: trans, + Propagation: &propagation.HTTPFormat{}, } } diff --git a/vendor/google.golang.org/api/vision/v1/vision-api.json b/vendor/google.golang.org/api/vision/v1/vision-api.json index 6166aaa38..1edface8b 100644 --- a/vendor/google.golang.org/api/vision/v1/vision-api.json +++ b/vendor/google.golang.org/api/vision/v1/vision-api.json @@ -141,6 +141,40 @@ } } }, + "locations": { + "resources": { + "operations": { + "methods": { + "get": { + "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + "flatPath": "v1/locations/{locationsId}/operations/{operationsId}", + "httpMethod": "GET", + "id": "vision.locations.operations.get", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "The name of the operation resource.", + "location": "path", + "pattern": "^locations/[^/]+/operations/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-vision" + ] + } + } + } + } + }, "operations": { "methods": { "cancel": { @@ -210,7 +244,7 @@ "name": { "description": "The name of the operation resource.", "location": "path", - "pattern": "^operations/.+$", + "pattern": "^operations/[^/]+$", "required": true, "type": "string" } @@ -269,7 +303,7 @@ } } }, - "revision": "20180221", + "revision": "20180320", "rootUrl": "https://vision.googleapis.com/", "schemas": { "AnnotateImageRequest": { @@ -912,6 +946,13 @@ "outputConfig": { "$ref": "GoogleCloudVisionV1p2beta1OutputConfig", "description": "The output location and metadata from AsyncAnnotateFileRequest." + }, + "outputs": { + "description": "The full list of output files in GCS.", + "items": { + "$ref": "GoogleCloudVisionV1p2beta1GcsDestination" + }, + "type": "array" } }, "type": "object" @@ -1732,7 +1773,7 @@ } }, "servicePath": "", - "title": "Google Cloud Vision API", + "title": "Cloud Vision API", "version": "v1", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/vision/v1/vision-gen.go b/vendor/google.golang.org/api/vision/v1/vision-gen.go index ddb64cd16..1a22e24ee 100644 --- a/vendor/google.golang.org/api/vision/v1/vision-gen.go +++ b/vendor/google.golang.org/api/vision/v1/vision-gen.go @@ -1,4 +1,4 @@ -// Package vision provides access to the Google Cloud Vision API. +// Package vision provides access to the Cloud Vision API. // // See https://cloud.google.com/vision/ // @@ -60,6 +60,7 @@ func New(client *http.Client) (*Service, error) { } s := &Service{client: client, BasePath: basePath} s.Images = NewImagesService(s) + s.Locations = NewLocationsService(s) s.Operations = NewOperationsService(s) return s, nil } @@ -71,6 +72,8 @@ type Service struct { Images *ImagesService + Locations *LocationsService + Operations *OperationsService } @@ -90,6 +93,27 @@ type ImagesService struct { s *Service } +func NewLocationsService(s *Service) *LocationsService { + rs := &LocationsService{s: s} + rs.Operations = NewLocationsOperationsService(s) + return rs +} + +type LocationsService struct { + s *Service + + Operations *LocationsOperationsService +} + +func NewLocationsOperationsService(s *Service) *LocationsOperationsService { + rs := &LocationsOperationsService{s: s} + return rs +} + +type LocationsOperationsService struct { + s *Service +} + func NewOperationsService(s *Service) *OperationsService { rs := &OperationsService{s: s} return rs @@ -1292,6 +1316,9 @@ type GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse struct { // AsyncAnnotateFileRequest. OutputConfig *GoogleCloudVisionV1p2beta1OutputConfig `json:"outputConfig,omitempty"` + // Outputs: The full list of output files in GCS. + Outputs []*GoogleCloudVisionV1p2beta1GcsDestination `json:"outputs,omitempty"` + // ForceSendFields is a list of field names (e.g. "OutputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -3063,6 +3090,150 @@ func (c *ImagesAnnotateCall) Do(opts ...googleapi.CallOption) (*BatchAnnotateIma } +// method id "vision.locations.operations.get": + +type LocationsOperationsGetCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. Clients can +// use this +// method to poll the operation result at intervals as recommended by +// the API +// service. +func (r *LocationsOperationsService) Get(name string) *LocationsOperationsGetCall { + c := &LocationsOperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *LocationsOperationsGetCall) Fields(s ...googleapi.Field) *LocationsOperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *LocationsOperationsGetCall) IfNoneMatch(entityTag string) *LocationsOperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *LocationsOperationsGetCall) Context(ctx context.Context) *LocationsOperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *LocationsOperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *LocationsOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "vision.locations.operations.get" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *LocationsOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.", + // "flatPath": "v1/locations/{locationsId}/operations/{operationsId}", + // "httpMethod": "GET", + // "id": "vision.locations.operations.get", + // "parameterOrder": [ + // "name" + // ], + // "parameters": { + // "name": { + // "description": "The name of the operation resource.", + // "location": "path", + // "pattern": "^locations/[^/]+/operations/[^/]+$", + // "required": true, + // "type": "string" + // } + // }, + // "path": "v1/{+name}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-vision" + // ] + // } + +} + // method id "vision.operations.cancel": type OperationsCancelCall struct { @@ -3475,7 +3646,7 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, error) // "name": { // "description": "The name of the operation resource.", // "location": "path", - // "pattern": "^operations/.+$", + // "pattern": "^operations/[^/]+$", // "required": true, // "type": "string" // } diff --git a/vendor/google.golang.org/api/vision/v1p1beta1/vision-api.json b/vendor/google.golang.org/api/vision/v1p1beta1/vision-api.json index cf73ad3a6..06e6948b7 100644 --- a/vendor/google.golang.org/api/vision/v1p1beta1/vision-api.json +++ b/vendor/google.golang.org/api/vision/v1p1beta1/vision-api.json @@ -142,7 +142,7 @@ } } }, - "revision": "20180221", + "revision": "20180320", "rootUrl": "https://vision.googleapis.com/", "schemas": { "Color": { @@ -1414,6 +1414,13 @@ "outputConfig": { "$ref": "GoogleCloudVisionV1p2beta1OutputConfig", "description": "The output location and metadata from AsyncAnnotateFileRequest." + }, + "outputs": { + "description": "The full list of output files in GCS.", + "items": { + "$ref": "GoogleCloudVisionV1p2beta1GcsDestination" + }, + "type": "array" } }, "type": "object" @@ -1540,7 +1547,7 @@ } }, "servicePath": "", - "title": "Google Cloud Vision API", + "title": "Cloud Vision API", "version": "v1p1beta1", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/vision/v1p1beta1/vision-gen.go b/vendor/google.golang.org/api/vision/v1p1beta1/vision-gen.go index 1ece157a1..239309241 100644 --- a/vendor/google.golang.org/api/vision/v1p1beta1/vision-gen.go +++ b/vendor/google.golang.org/api/vision/v1p1beta1/vision-gen.go @@ -1,4 +1,4 @@ -// Package vision provides access to the Google Cloud Vision API. +// Package vision provides access to the Cloud Vision API. // // See https://cloud.google.com/vision/ // @@ -2447,6 +2447,9 @@ type GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse struct { // AsyncAnnotateFileRequest. OutputConfig *GoogleCloudVisionV1p2beta1OutputConfig `json:"outputConfig,omitempty"` + // Outputs: The full list of output files in GCS. + Outputs []*GoogleCloudVisionV1p2beta1GcsDestination `json:"outputs,omitempty"` + // ForceSendFields is a list of field names (e.g. "OutputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, diff --git a/vendor/google.golang.org/api/vision/v1p2beta1/vision-api.json b/vendor/google.golang.org/api/vision/v1p2beta1/vision-api.json index 8d334d67f..4708b31b1 100644 --- a/vendor/google.golang.org/api/vision/v1p2beta1/vision-api.json +++ b/vendor/google.golang.org/api/vision/v1p2beta1/vision-api.json @@ -165,7 +165,7 @@ } } }, - "revision": "20180221", + "revision": "20180320", "rootUrl": "https://vision.googleapis.com/", "schemas": { "Color": { @@ -320,6 +320,13 @@ "outputConfig": { "$ref": "GoogleCloudVisionV1p2beta1OutputConfig", "description": "The output location and metadata from AsyncAnnotateFileRequest." + }, + "outputs": { + "description": "The full list of output files in GCS.", + "items": { + "$ref": "GoogleCloudVisionV1p2beta1GcsDestination" + }, + "type": "array" } }, "type": "object" @@ -1684,7 +1691,7 @@ } }, "servicePath": "", - "title": "Google Cloud Vision API", + "title": "Cloud Vision API", "version": "v1p2beta1", "version_module": true } \ No newline at end of file diff --git a/vendor/google.golang.org/api/vision/v1p2beta1/vision-gen.go b/vendor/google.golang.org/api/vision/v1p2beta1/vision-gen.go index 5c74ddacc..332db63dd 100644 --- a/vendor/google.golang.org/api/vision/v1p2beta1/vision-gen.go +++ b/vendor/google.golang.org/api/vision/v1p2beta1/vision-gen.go @@ -1,4 +1,4 @@ -// Package vision provides access to the Google Cloud Vision API. +// Package vision provides access to the Cloud Vision API. // // See https://cloud.google.com/vision/ // @@ -460,6 +460,9 @@ type GoogleCloudVisionV1p2beta1AsyncAnnotateFileResponse struct { // AsyncAnnotateFileRequest. OutputConfig *GoogleCloudVisionV1p2beta1OutputConfig `json:"outputConfig,omitempty"` + // Outputs: The full list of output files in GCS. + Outputs []*GoogleCloudVisionV1p2beta1GcsDestination `json:"outputs,omitempty"` + // ForceSendFields is a list of field names (e.g. "OutputConfig") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, diff --git a/vendor/google.golang.org/api/youtube/v3/youtube-api.json b/vendor/google.golang.org/api/youtube/v3/youtube-api.json index bf203dba8..f1c8bcfc8 100644 --- a/vendor/google.golang.org/api/youtube/v3/youtube-api.json +++ b/vendor/google.golang.org/api/youtube/v3/youtube-api.json @@ -30,7 +30,7 @@ "description": "Supports core YouTube features, such as uploading videos, creating and managing playlists, searching for content, and much more.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/youtube/v3", - "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/4RE3yZ8G6hkc4zQ8l8vwLojqIsk\"", + "etag": "\"-iA1DTNe4s-I6JZXPt1t1Ypy8IU/-ZA6uS3KXkDeiPcCXgXN_f_GBAw\"", "icons": { "x16": "https://www.google.com/images/icons/product/youtube-16.png", "x32": "https://www.google.com/images/icons/product/youtube-32.png" @@ -3541,7 +3541,7 @@ } } }, - "revision": "20180205", + "revision": "20180308", "rootUrl": "https://www.googleapis.com/", "schemas": { "AccessPolicy": { @@ -7488,6 +7488,19 @@ }, "description": "This setting indicates whether the broadcast should automatically begin with an in-stream slate when you update the broadcast's status to live. After updating the status, you then need to send a liveCuepoints.insert request that sets the cuepoint's eventState to end to remove the in-stream slate and make your broadcast stream visible to viewers.", "type": "boolean" + }, + "stereoLayout": { + "enum": [ + "left_right", + "mono", + "top_bottom" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" } }, "type": "object" @@ -9293,6 +9306,11 @@ "description": "The id of the channel being sponsored.", "type": "string" }, + "cumulativeDurationMonths": { + "description": "The cumulative time a user has been a sponsor in months.", + "format": "int32", + "type": "integer" + }, "sponsorDetails": { "$ref": "ChannelProfileDetails", "description": "Details about the sponsor." diff --git a/vendor/google.golang.org/api/youtube/v3/youtube-gen.go b/vendor/google.golang.org/api/youtube/v3/youtube-gen.go index 49c035698..163785eab 100644 --- a/vendor/google.golang.org/api/youtube/v3/youtube-gen.go +++ b/vendor/google.golang.org/api/youtube/v3/youtube-gen.go @@ -4812,6 +4812,12 @@ type LiveBroadcastContentDetails struct { // broadcast stream visible to viewers. StartWithSlate bool `json:"startWithSlate,omitempty"` + // Possible values: + // "left_right" + // "mono" + // "top_bottom" + StereoLayout string `json:"stereoLayout,omitempty"` + // ForceSendFields is a list of field names (e.g. "BoundStreamId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -7491,6 +7497,10 @@ type SponsorSnippet struct { // ChannelId: The id of the channel being sponsored. ChannelId string `json:"channelId,omitempty"` + // CumulativeDurationMonths: The cumulative time a user has been a + // sponsor in months. + CumulativeDurationMonths int64 `json:"cumulativeDurationMonths,omitempty"` + // SponsorDetails: Details about the sponsor. SponsorDetails *ChannelProfileDetails `json:"sponsorDetails,omitempty"` diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go index 949064763..8b3037647 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/resources.pb.go @@ -46,6 +46,7 @@ import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" import google_api2 "google.golang.org/genproto/googleapis/api/configchange" +import _ "google.golang.org/genproto/googleapis/api/metric" import _ "google.golang.org/genproto/googleapis/api/serviceconfig" import _ "google.golang.org/genproto/googleapis/longrunning" import _ "github.com/golang/protobuf/ptypes/any" @@ -65,21 +66,22 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Code describes the status of one operation step. +// Code describes the status of the operation (or one of its steps). type OperationMetadata_Status int32 const ( // Unspecifed code. OperationMetadata_STATUS_UNSPECIFIED OperationMetadata_Status = 0 - // The step has completed without errors. + // The operation or step has completed without errors. OperationMetadata_DONE OperationMetadata_Status = 1 - // The step has not started yet. + // The operation or step has not started yet. OperationMetadata_NOT_STARTED OperationMetadata_Status = 2 - // The step is in progress. + // The operation or step is in progress. OperationMetadata_IN_PROGRESS OperationMetadata_Status = 3 - // The step has completed with errors. + // The operation or step has completed with errors. If the operation is + // rollbackable, the rollback completed with errors too. OperationMetadata_FAILED OperationMetadata_Status = 4 - // The step has completed with cancellation. + // The operation or step has completed with cancellation. OperationMetadata_CANCELLED OperationMetadata_Status = 5 ) @@ -148,6 +150,12 @@ const ( // // $protoc --include_imports --include_source_info test.proto -o out.pb ConfigFile_FILE_DESCRIPTOR_SET_PROTO ConfigFile_FileType = 4 + // Uncompiled Proto file. Used for storage and display purposes only, + // currently server-side compilation is not supported. Should match the + // inputs to 'protoc' command used to generated FILE_DESCRIPTOR_SET_PROTO. A + // file of this type can only be included if at least one file of type + // FILE_DESCRIPTOR_SET_PROTO is included. + ConfigFile_PROTO_FILE ConfigFile_FileType = 6 ) var ConfigFile_FileType_name = map[int32]string{ @@ -156,6 +164,7 @@ var ConfigFile_FileType_name = map[int32]string{ 2: "OPEN_API_JSON", 3: "OPEN_API_YAML", 4: "FILE_DESCRIPTOR_SET_PROTO", + 6: "PROTO_FILE", } var ConfigFile_FileType_value = map[string]int32{ "FILE_TYPE_UNSPECIFIED": 0, @@ -163,6 +172,7 @@ var ConfigFile_FileType_value = map[string]int32{ "OPEN_API_JSON": 2, "OPEN_API_YAML": 3, "FILE_DESCRIPTOR_SET_PROTO": 4, + "PROTO_FILE": 6, } func (x ConfigFile_FileType) String() string { @@ -183,10 +193,13 @@ const ( // The Rollout has been cancelled. This can happen if you have overlapping // Rollout pushes, and the previous ones will be cancelled. Rollout_CANCELLED Rollout_RolloutStatus = 3 - // The Rollout has failed. It is typically caused by configuration errors. + // The Rollout has failed and the rollback attempt has failed too. Rollout_FAILED Rollout_RolloutStatus = 4 // The Rollout has not started yet and is pending for execution. Rollout_PENDING Rollout_RolloutStatus = 5 + // The Rollout has failed and rolled back to the previous successful + // Rollout. + Rollout_FAILED_ROLLED_BACK Rollout_RolloutStatus = 6 ) var Rollout_RolloutStatus_name = map[int32]string{ @@ -196,6 +209,7 @@ var Rollout_RolloutStatus_name = map[int32]string{ 3: "CANCELLED", 4: "FAILED", 5: "PENDING", + 6: "FAILED_ROLLED_BACK", } var Rollout_RolloutStatus_value = map[string]int32{ "ROLLOUT_STATUS_UNSPECIFIED": 0, @@ -204,6 +218,7 @@ var Rollout_RolloutStatus_value = map[string]int32{ "CANCELLED": 3, "FAILED": 4, "PENDING": 5, + "FAILED_ROLLED_BACK": 6, } func (x Rollout_RolloutStatus) String() string { @@ -642,9 +657,10 @@ func _Rollout_OneofSizer(msg proto.Message) (n int) { return n } -// Strategy that specifies how Google Service Control should select -// different -// versions of service configurations based on traffic percentage. +// Strategy that specifies how clients of Google Service Controller want to +// send traffic to use different config versions. This is generally +// used by API proxy to split traffic based on your configured precentage for +// each config version. // // One example of how to gradually rollout a new service configuration using // this @@ -725,78 +741,81 @@ func init() { func init() { proto.RegisterFile("google/api/servicemanagement/v1/resources.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1166 bytes of a gzipped FileDescriptorProto + // 1201 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x8e, 0xdb, 0x44, - 0x10, 0xaf, 0xf3, 0xe7, 0xee, 0x32, 0xb9, 0x0b, 0xee, 0x96, 0xf6, 0xd2, 0xd0, 0x3f, 0xc1, 0x15, - 0xd2, 0x49, 0x48, 0x0e, 0x0d, 0x08, 0x28, 0x95, 0x5a, 0xe5, 0x12, 0xdf, 0x61, 0xc8, 0xd9, 0xee, - 0xda, 0x07, 0x2a, 0x5f, 0xac, 0xad, 0xbd, 0x71, 0x4d, 0x13, 0xdb, 0xb2, 0x37, 0x27, 0x45, 0xfd, - 0xc8, 0x0b, 0xf0, 0x0c, 0x7c, 0x81, 0x47, 0xe1, 0x03, 0x4f, 0x00, 0x2f, 0x83, 0xbc, 0x5e, 0xdf, - 0xe5, 0xcf, 0xa1, 0x14, 0xf8, 0x92, 0xec, 0xfc, 0x66, 0xf6, 0x37, 0xb3, 0xb3, 0x33, 0xb3, 0x86, - 0x5e, 0x10, 0xc7, 0xc1, 0x94, 0xf6, 0x48, 0x12, 0xf6, 0x32, 0x9a, 0x5e, 0x84, 0x1e, 0x9d, 0x91, - 0x88, 0x04, 0x74, 0x46, 0x23, 0xd6, 0xbb, 0x78, 0xdc, 0x4b, 0x69, 0x16, 0xcf, 0x53, 0x8f, 0x66, - 0x6a, 0x92, 0xc6, 0x2c, 0x46, 0x0f, 0x8b, 0x0d, 0x2a, 0x49, 0x42, 0x75, 0x63, 0x83, 0x7a, 0xf1, - 0xb8, 0x73, 0x6f, 0x89, 0x91, 0x44, 0x51, 0xcc, 0x08, 0x0b, 0xe3, 0x48, 0x6c, 0xef, 0x3c, 0x58, - 0xd2, 0x7a, 0x71, 0x34, 0x09, 0x03, 0xd7, 0x7b, 0x4d, 0xa2, 0x80, 0x0a, 0x7d, 0x7b, 0x33, 0x1e, - 0xa1, 0x79, 0x24, 0x34, 0xd3, 0x38, 0x0a, 0xd2, 0x79, 0x14, 0x85, 0x51, 0xd0, 0x8b, 0x13, 0x9a, - 0xae, 0xd0, 0xdf, 0x15, 0x46, 0x5c, 0x7a, 0x35, 0x9f, 0xf4, 0x48, 0xb4, 0x10, 0xaa, 0xee, 0xba, - 0x6a, 0x12, 0xd2, 0xa9, 0xef, 0xce, 0x48, 0xf6, 0x46, 0x58, 0xdc, 0x5b, 0xb7, 0xc8, 0x58, 0x3a, - 0xf7, 0x98, 0xd0, 0x3e, 0x5c, 0xd7, 0xb2, 0x70, 0x46, 0x33, 0x46, 0x66, 0x89, 0x30, 0x38, 0x14, - 0x06, 0x69, 0xe2, 0xf5, 0x32, 0x46, 0xd8, 0x5c, 0x04, 0xa5, 0x78, 0xd0, 0x3a, 0xe3, 0x29, 0xf2, - 0xed, 0xe2, 0x44, 0xe8, 0x43, 0xd8, 0x17, 0x87, 0x73, 0x23, 0x32, 0xa3, 0xed, 0x4a, 0x57, 0x3a, - 0x6a, 0xe0, 0xa6, 0xc0, 0x0c, 0x32, 0xa3, 0x48, 0x85, 0x5b, 0x49, 0x1a, 0xfb, 0x73, 0x8f, 0xa6, - 0x6e, 0x92, 0xc6, 0x3f, 0x52, 0x8f, 0xb9, 0xa1, 0xdf, 0xae, 0x72, 0xcb, 0x9b, 0xa5, 0xca, 0x2a, - 0x34, 0xba, 0xaf, 0xfc, 0x55, 0x85, 0x9b, 0x66, 0x99, 0x8e, 0x33, 0xca, 0x88, 0x4f, 0x18, 0x41, - 0x1f, 0x41, 0xab, 0xbc, 0x40, 0xee, 0x29, 0x6b, 0x4b, 0xdd, 0xea, 0x51, 0x03, 0x1f, 0x94, 0x68, - 0xee, 0x2b, 0x43, 0x67, 0x50, 0xcf, 0x18, 0x4d, 0xb2, 0x76, 0xa5, 0x5b, 0x3d, 0x6a, 0xf6, 0xbf, - 0x50, 0xb7, 0x5c, 0xb2, 0xba, 0xe1, 0x49, 0xb5, 0x19, 0x4d, 0x70, 0xc1, 0x82, 0x7a, 0x3c, 0xf6, - 0x20, 0xa5, 0x59, 0xe6, 0x26, 0x34, 0xf5, 0x68, 0xc4, 0x48, 0x40, 0x79, 0xec, 0x75, 0x8c, 0x4a, - 0x95, 0x75, 0xa9, 0x41, 0x4f, 0x00, 0x32, 0x46, 0x52, 0xe6, 0xe6, 0x39, 0x6d, 0xd7, 0xba, 0xd2, - 0x51, 0xb3, 0xdf, 0x29, 0x83, 0x28, 0x13, 0xae, 0x3a, 0x65, 0xc2, 0x71, 0x83, 0x5b, 0xe7, 0x72, - 0xe7, 0x2d, 0xd4, 0x72, 0xd7, 0xa8, 0x0b, 0x4d, 0x9f, 0x66, 0x5e, 0x1a, 0x26, 0x79, 0x58, 0x65, - 0x46, 0x97, 0x20, 0xf4, 0x02, 0x76, 0x8a, 0x6b, 0xe1, 0x0e, 0x5a, 0xfd, 0x27, 0xff, 0xe9, 0x94, - 0x39, 0x01, 0x16, 0x44, 0x4a, 0x00, 0x3b, 0x05, 0x82, 0xee, 0x00, 0xb2, 0x9d, 0x81, 0x73, 0x6e, - 0xbb, 0xe7, 0x86, 0x6d, 0x69, 0x43, 0xfd, 0x44, 0xd7, 0x46, 0xf2, 0x0d, 0xb4, 0x07, 0xb5, 0x91, - 0x69, 0x68, 0xb2, 0x84, 0xde, 0x83, 0xa6, 0x61, 0x3a, 0xae, 0xed, 0x0c, 0xb0, 0xa3, 0x8d, 0xe4, - 0x4a, 0x0e, 0xe8, 0x86, 0x6b, 0x61, 0xf3, 0x14, 0x6b, 0xb6, 0x2d, 0x57, 0x11, 0xc0, 0xce, 0xc9, - 0x40, 0x1f, 0x6b, 0x23, 0xb9, 0x86, 0x0e, 0xa0, 0x31, 0x1c, 0x18, 0x43, 0x6d, 0x9c, 0x8b, 0x75, - 0xe5, 0x37, 0x09, 0x60, 0x14, 0x92, 0x20, 0x8a, 0x33, 0x16, 0x7a, 0xa8, 0x03, 0x7b, 0xd3, 0xd8, - 0xe3, 0xa1, 0xb5, 0x25, 0x7e, 0xd2, 0x4b, 0x19, 0x8d, 0xa0, 0xf6, 0x26, 0x8c, 0x7c, 0x9e, 0x81, - 0x56, 0xff, 0x93, 0xad, 0x87, 0xbc, 0xa2, 0x55, 0xbf, 0x0d, 0x23, 0x1f, 0xf3, 0xdd, 0xa8, 0x0d, - 0xbb, 0x33, 0x9a, 0x65, 0xe5, 0xb5, 0x35, 0x70, 0x29, 0x2a, 0x0f, 0xa0, 0x96, 0xdb, 0xa1, 0x26, - 0xec, 0x7e, 0x3f, 0xc0, 0x86, 0x6e, 0x9c, 0xca, 0x37, 0x50, 0x03, 0xea, 0x1a, 0xc6, 0x26, 0x96, - 0x25, 0x85, 0xc0, 0xfe, 0x90, 0x37, 0xb6, 0xcd, 0x0b, 0x0c, 0xb5, 0xa0, 0x12, 0xfa, 0xed, 0x3a, - 0x27, 0xa9, 0x84, 0x3e, 0x1a, 0x40, 0x7d, 0x12, 0x4e, 0x69, 0x59, 0x6b, 0x1f, 0x6f, 0x0d, 0xb0, - 0x60, 0x3b, 0x09, 0xa7, 0x14, 0x17, 0x3b, 0x95, 0x5f, 0x2b, 0x00, 0x57, 0x28, 0xfa, 0x00, 0x1a, - 0x39, 0xee, 0x26, 0x84, 0xbd, 0x2e, 0xd3, 0x91, 0x03, 0x16, 0x61, 0xaf, 0xd1, 0x23, 0x38, 0xe0, - 0x4a, 0x2f, 0x8e, 0x18, 0x8d, 0x58, 0xc6, 0x8f, 0xb3, 0x8f, 0xf7, 0x73, 0x70, 0x28, 0x30, 0xf4, - 0x42, 0x30, 0xb0, 0x45, 0x42, 0x45, 0x75, 0x7c, 0xf6, 0x2f, 0xe2, 0x52, 0xf3, 0x1f, 0x67, 0x91, - 0xd0, 0xc2, 0x6f, 0xbe, 0x52, 0x7e, 0x92, 0x60, 0xaf, 0x84, 0xd1, 0x5d, 0xb8, 0x7d, 0xa2, 0x8f, - 0x35, 0xd7, 0x79, 0x69, 0x69, 0x6b, 0x05, 0x72, 0x08, 0xb7, 0x6c, 0x0d, 0x7f, 0xa7, 0x0f, 0x35, - 0x77, 0x68, 0x1a, 0x27, 0xfa, 0xa9, 0xfb, 0x72, 0x70, 0x36, 0x96, 0x25, 0x74, 0x13, 0x0e, 0x4c, - 0x4b, 0x33, 0xdc, 0x81, 0xa5, 0xbb, 0xdf, 0xd8, 0xa6, 0x21, 0x57, 0x56, 0x20, 0x6e, 0x55, 0x45, - 0xf7, 0xe1, 0x2e, 0x67, 0x1e, 0x69, 0xf6, 0x10, 0xeb, 0x96, 0x63, 0x62, 0xd7, 0xd6, 0x9c, 0xbc, - 0xaa, 0x1c, 0x53, 0xae, 0x29, 0x0f, 0xa1, 0x51, 0x84, 0x89, 0xe9, 0x04, 0x21, 0xa8, 0xf1, 0x69, - 0x53, 0xa4, 0x88, 0xaf, 0x15, 0x13, 0xf6, 0x87, 0x7c, 0xfe, 0x62, 0x9a, 0xc4, 0x29, 0x43, 0xcf, - 0xa1, 0xb5, 0x32, 0x96, 0x8b, 0x81, 0xd1, 0xec, 0xb7, 0x97, 0xd3, 0x51, 0x50, 0x8a, 0x7d, 0x07, - 0xde, 0x92, 0x94, 0x29, 0x7f, 0xee, 0xc0, 0x2e, 0x8e, 0xa7, 0xd3, 0x78, 0xce, 0xd0, 0x7d, 0x80, - 0xb4, 0x58, 0xe6, 0xa3, 0xab, 0x70, 0xdb, 0x10, 0x88, 0xee, 0xa3, 0xa7, 0xd0, 0xf4, 0x52, 0x4a, - 0x18, 0x2d, 0xda, 0xbe, 0xb2, 0xb5, 0xed, 0xa1, 0x30, 0xcf, 0x81, 0x9c, 0xbb, 0x90, 0x7c, 0xf7, - 0xd5, 0x42, 0xd4, 0x68, 0x43, 0x20, 0xc7, 0x0b, 0x64, 0xac, 0x35, 0xfb, 0xe7, 0x5b, 0xaf, 0x53, - 0x04, 0x5d, 0xfe, 0xaf, 0x76, 0x3a, 0x7a, 0x0b, 0x6d, 0x96, 0x92, 0xc9, 0x24, 0xf4, 0xca, 0x89, - 0xe6, 0x66, 0x2c, 0x25, 0x8c, 0x06, 0x0b, 0x5e, 0xdb, 0xcd, 0xfe, 0xf3, 0x77, 0xf6, 0xe0, 0x14, - 0x44, 0x62, 0xfe, 0xd9, 0x82, 0xe6, 0xeb, 0x1b, 0xf8, 0x0e, 0xbb, 0x56, 0x83, 0x16, 0x70, 0xe8, - 0xd3, 0x29, 0x65, 0xd4, 0x2d, 0x5f, 0x8d, 0x4b, 0xdf, 0xbf, 0x4b, 0xdc, 0xf9, 0xb3, 0x77, 0x76, - 0x3e, 0xe2, 0x44, 0xe2, 0x21, 0x5a, 0xf2, 0x7d, 0xdb, 0xbf, 0x4e, 0xb1, 0xf1, 0x52, 0xed, 0x6d, - 0xbc, 0x54, 0x9d, 0x3f, 0x24, 0xb8, 0x73, 0xfd, 0x91, 0x50, 0x0a, 0xcd, 0xab, 0xf9, 0x5f, 0x96, - 0x92, 0xf5, 0x3f, 0x13, 0xa5, 0x5e, 0x3d, 0x1c, 0x99, 0x16, 0xb1, 0x74, 0x81, 0x97, 0x9d, 0x74, - 0x9e, 0x81, 0xbc, 0x6e, 0x80, 0x64, 0xa8, 0xbe, 0xa1, 0x0b, 0x51, 0x81, 0xf9, 0x12, 0xbd, 0x0f, - 0xf5, 0x0b, 0x32, 0x9d, 0x17, 0x55, 0x27, 0xe1, 0x42, 0xf8, 0xaa, 0xf2, 0xa5, 0xd4, 0x39, 0x84, - 0xdb, 0xd7, 0xe6, 0x48, 0x99, 0xc3, 0xc1, 0x4a, 0x6d, 0xa0, 0x07, 0xd0, 0xc1, 0xe6, 0x78, 0x6c, - 0x9e, 0xf3, 0xa9, 0xbe, 0x39, 0xfb, 0xd7, 0x06, 0xbc, 0x94, 0x8f, 0x4c, 0xfb, 0x7c, 0x38, 0xcc, - 0x85, 0xca, 0xea, 0x84, 0x5f, 0x1d, 0xfe, 0x4d, 0xd8, 0xb5, 0x34, 0x63, 0x94, 0x8f, 0xd6, 0xfa, - 0x31, 0xc0, 0x5e, 0x79, 0xdb, 0xc7, 0x3f, 0x4b, 0xf0, 0xc8, 0x8b, 0x67, 0xdb, 0x12, 0x78, 0xdc, - 0xc2, 0xe5, 0x57, 0x9b, 0x95, 0x77, 0x91, 0x25, 0xfd, 0x60, 0x89, 0x2d, 0x41, 0x3c, 0x25, 0x51, - 0xa0, 0xc6, 0x69, 0xd0, 0x0b, 0x68, 0xc4, 0x7b, 0x4c, 0x7c, 0x02, 0x92, 0x24, 0xcc, 0xfe, 0xf1, - 0x33, 0xf0, 0xe9, 0x06, 0xf8, 0x4b, 0xa5, 0x76, 0x3a, 0xb0, 0xcf, 0x5e, 0xed, 0x70, 0x8e, 0x4f, - 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x64, 0x4d, 0x1e, 0x49, 0x0a, 0x00, 0x00, + 0x10, 0xaf, 0xf3, 0xef, 0x2e, 0x93, 0xbb, 0xe0, 0x6e, 0x69, 0x2f, 0x0d, 0xfd, 0x73, 0xa4, 0x42, + 0x3a, 0x09, 0xc9, 0xa1, 0x07, 0x02, 0x4a, 0xa5, 0x56, 0xb9, 0xc4, 0x77, 0x84, 0xde, 0xd9, 0xee, + 0x3a, 0x07, 0x2a, 0x5f, 0xac, 0xad, 0xbd, 0x71, 0x4d, 0x13, 0xdb, 0x5a, 0x6f, 0x4e, 0x8a, 0xfa, + 0x0c, 0x48, 0x48, 0xbc, 0x01, 0x9f, 0x78, 0x03, 0x5e, 0x81, 0x0f, 0xbc, 0x01, 0x8f, 0xc1, 0x0b, + 0xa0, 0x5d, 0xaf, 0x7b, 0xf9, 0x73, 0x28, 0x05, 0xbe, 0x24, 0x3b, 0xbf, 0xdf, 0xec, 0xcc, 0xec, + 0xec, 0xec, 0x8c, 0xa1, 0x1b, 0x26, 0x49, 0x38, 0xa1, 0x5d, 0x92, 0x46, 0xdd, 0x8c, 0xb2, 0x8b, + 0xc8, 0xa7, 0x53, 0x12, 0x93, 0x90, 0x4e, 0x69, 0xcc, 0xbb, 0x17, 0x0f, 0xbb, 0x8c, 0x66, 0xc9, + 0x8c, 0xf9, 0x34, 0x33, 0x52, 0x96, 0xf0, 0x04, 0xdd, 0xcf, 0x37, 0x18, 0x24, 0x8d, 0x8c, 0xb5, + 0x0d, 0xc6, 0xc5, 0xc3, 0xf6, 0x9d, 0x05, 0x8b, 0x24, 0x8e, 0x13, 0x4e, 0x78, 0x94, 0xc4, 0x6a, + 0x7b, 0xfb, 0xde, 0x02, 0xeb, 0x27, 0xf1, 0x38, 0x0a, 0x3d, 0xff, 0x15, 0x89, 0x43, 0xaa, 0xf8, + 0xbd, 0x05, 0x7e, 0x4a, 0x39, 0x8b, 0x7c, 0x45, 0xb4, 0xd6, 0x03, 0x55, 0xcc, 0x03, 0xc5, 0x4c, + 0x92, 0x38, 0x64, 0xb3, 0x38, 0x8e, 0xe2, 0xb0, 0x9b, 0xa4, 0x94, 0x2d, 0xf9, 0xbd, 0xad, 0x94, + 0xa4, 0xf4, 0x72, 0x36, 0xee, 0x92, 0x78, 0xae, 0xa8, 0xfd, 0x55, 0x6a, 0x1c, 0xd1, 0x49, 0xe0, + 0x4d, 0x49, 0xf6, 0x5a, 0x69, 0xdc, 0x59, 0xd5, 0xc8, 0x38, 0x9b, 0xf9, 0x5c, 0xb1, 0xf7, 0x57, + 0x59, 0x1e, 0x4d, 0x69, 0xc6, 0xc9, 0x34, 0x5d, 0x39, 0x13, 0x4b, 0xfd, 0x6e, 0xc6, 0x09, 0x9f, + 0xa9, 0xa0, 0x3a, 0x3e, 0x34, 0xcf, 0x64, 0xee, 0x02, 0x37, 0x3f, 0x11, 0xfa, 0x10, 0x76, 0xd4, + 0xe1, 0xbc, 0x98, 0x4c, 0x69, 0xab, 0xb4, 0xaf, 0x1d, 0xd4, 0x71, 0x43, 0x61, 0x16, 0x99, 0x52, + 0x64, 0xc0, 0x8d, 0x94, 0x25, 0xc1, 0xcc, 0xa7, 0xcc, 0x4b, 0x59, 0xf2, 0x03, 0xf5, 0xb9, 0x17, + 0x05, 0xad, 0xb2, 0xd4, 0xbc, 0x5e, 0x50, 0x4e, 0xce, 0x0c, 0x83, 0xce, 0x9f, 0x65, 0xb8, 0x6e, + 0x17, 0xe9, 0x38, 0xa3, 0x9c, 0x04, 0x84, 0x13, 0xf4, 0x11, 0x34, 0x8b, 0x9b, 0x95, 0x9e, 0xb2, + 0x96, 0xb6, 0x5f, 0x3e, 0xa8, 0xe3, 0xdd, 0x02, 0x15, 0xbe, 0x32, 0x74, 0x06, 0xd5, 0x8c, 0xd3, + 0x34, 0x6b, 0x95, 0xf6, 0xcb, 0x07, 0x8d, 0xc3, 0x2f, 0x8c, 0x0d, 0xb7, 0x6f, 0xac, 0x79, 0x32, + 0x5c, 0x4e, 0x53, 0x9c, 0x5b, 0x41, 0x5d, 0x19, 0x7b, 0xc8, 0x68, 0x96, 0x79, 0x29, 0x65, 0x3e, + 0x8d, 0x39, 0x09, 0xa9, 0x8c, 0xbd, 0x8a, 0x51, 0x41, 0x39, 0x6f, 0x19, 0xf4, 0x08, 0x20, 0xe3, + 0x84, 0x71, 0x4f, 0xe4, 0xb4, 0x55, 0xd9, 0xd7, 0x0e, 0x1a, 0x87, 0xed, 0x22, 0x88, 0x22, 0xe1, + 0xc6, 0xa8, 0x48, 0x38, 0xae, 0x4b, 0x6d, 0x21, 0xb7, 0xdf, 0x40, 0x45, 0xb8, 0x46, 0xfb, 0xd0, + 0x08, 0x68, 0xe6, 0xb3, 0x28, 0x15, 0x61, 0x15, 0x19, 0x5d, 0x80, 0xd0, 0x73, 0xa8, 0xe5, 0xd7, + 0x22, 0x1d, 0x34, 0x0f, 0x1f, 0xfd, 0xa7, 0x53, 0x0a, 0x03, 0x58, 0x19, 0xea, 0x84, 0x50, 0xcb, + 0x11, 0x74, 0x0b, 0x90, 0x3b, 0xea, 0x8d, 0xce, 0x5d, 0xef, 0xdc, 0x72, 0x1d, 0xb3, 0x3f, 0x3c, + 0x1e, 0x9a, 0x03, 0xfd, 0x1a, 0xda, 0x86, 0xca, 0xc0, 0xb6, 0x4c, 0x5d, 0x43, 0xef, 0x41, 0xc3, + 0xb2, 0x47, 0x9e, 0x3b, 0xea, 0xe1, 0x91, 0x39, 0xd0, 0x4b, 0x02, 0x18, 0x5a, 0x9e, 0x83, 0xed, + 0x13, 0x6c, 0xba, 0xae, 0x5e, 0x46, 0x00, 0xb5, 0xe3, 0xde, 0xf0, 0xd4, 0x1c, 0xe8, 0x15, 0xb4, + 0x0b, 0xf5, 0x7e, 0xcf, 0xea, 0x9b, 0xa7, 0x42, 0xac, 0x76, 0x7e, 0xd5, 0x00, 0x06, 0x11, 0x09, + 0xe3, 0x24, 0xe3, 0x91, 0x8f, 0xda, 0xb0, 0x3d, 0x49, 0x7c, 0x19, 0x5a, 0x4b, 0x93, 0x27, 0x7d, + 0x2b, 0xa3, 0x01, 0x54, 0x5e, 0x47, 0x71, 0x20, 0x33, 0xd0, 0x3c, 0xfc, 0x64, 0xe3, 0x21, 0x2f, + 0xcd, 0x1a, 0xcf, 0xa2, 0x38, 0xc0, 0x72, 0x37, 0x6a, 0xc1, 0xd6, 0x94, 0x66, 0x59, 0x71, 0x6d, + 0x75, 0x5c, 0x88, 0x9d, 0x7b, 0x50, 0x11, 0x7a, 0xa8, 0x01, 0x5b, 0xdf, 0xf5, 0xb0, 0x35, 0xb4, + 0x4e, 0xf4, 0x6b, 0xa8, 0x0e, 0x55, 0x13, 0x63, 0x1b, 0xeb, 0x5a, 0x87, 0xc0, 0x4e, 0x5f, 0xbe, + 0x78, 0x57, 0x16, 0x18, 0x6a, 0x42, 0x29, 0x0a, 0x5a, 0x55, 0x69, 0xa4, 0x14, 0x05, 0xa8, 0x07, + 0xd5, 0x71, 0x34, 0xa1, 0x45, 0xad, 0x7d, 0xbc, 0x31, 0xc0, 0xdc, 0xda, 0x71, 0x34, 0xa1, 0x38, + 0xdf, 0xd9, 0xf9, 0xad, 0x04, 0x70, 0x89, 0xa2, 0x0f, 0xa0, 0x2e, 0x70, 0x2f, 0x25, 0xfc, 0x55, + 0x91, 0x0e, 0x01, 0x38, 0x84, 0xbf, 0x42, 0x0f, 0x60, 0x57, 0x92, 0x7e, 0x12, 0x73, 0x1a, 0xf3, + 0x4c, 0x1e, 0x67, 0x07, 0xef, 0x08, 0xb0, 0xaf, 0x30, 0xf4, 0x5c, 0x59, 0xe0, 0xf3, 0x94, 0xaa, + 0xea, 0xf8, 0xec, 0x5f, 0xc4, 0x65, 0x88, 0x9f, 0xd1, 0x3c, 0xa5, 0xb9, 0x5f, 0xb1, 0xea, 0xfc, + 0xac, 0xc1, 0x76, 0x01, 0xa3, 0xdb, 0x70, 0xf3, 0x78, 0x78, 0x6a, 0x7a, 0xa3, 0x17, 0x8e, 0xb9, + 0x52, 0x20, 0x7b, 0x70, 0xc3, 0x35, 0xf1, 0xb7, 0xc3, 0xbe, 0xe9, 0xf5, 0x6d, 0xeb, 0x78, 0x78, + 0xe2, 0xbd, 0xe8, 0x9d, 0x9d, 0xea, 0x1a, 0xba, 0x0e, 0xbb, 0xb6, 0x63, 0x5a, 0x5e, 0xcf, 0x19, + 0x7a, 0xdf, 0xb8, 0xb6, 0xa5, 0x97, 0x96, 0x20, 0xa9, 0x55, 0x46, 0x77, 0xe1, 0xb6, 0xb4, 0x3c, + 0x30, 0xdd, 0x3e, 0x1e, 0x3a, 0x23, 0x1b, 0x7b, 0xae, 0x39, 0x12, 0x55, 0x35, 0xb2, 0xf5, 0x0a, + 0x6a, 0x02, 0xc8, 0xa5, 0x27, 0x94, 0xf4, 0x5a, 0xe7, 0x3e, 0xd4, 0xf3, 0xb0, 0x31, 0x1d, 0x23, + 0x04, 0x15, 0xd9, 0x7d, 0xf2, 0x94, 0xc9, 0x75, 0xc7, 0x86, 0x9d, 0xbe, 0x6c, 0xd4, 0x98, 0xa6, + 0x09, 0xe3, 0xe8, 0x29, 0x34, 0x97, 0xfa, 0x77, 0xde, 0x40, 0x1a, 0x87, 0xad, 0xc5, 0xf4, 0xe4, + 0x26, 0xd5, 0xbe, 0x5d, 0x7f, 0x41, 0xca, 0x3a, 0x7f, 0xd5, 0x60, 0x0b, 0x27, 0x93, 0x49, 0x32, + 0xe3, 0xe8, 0x2e, 0x00, 0xcb, 0x97, 0xa2, 0x95, 0xe5, 0x6e, 0xeb, 0x0a, 0x19, 0x06, 0xe8, 0x31, + 0x34, 0x7c, 0x46, 0x09, 0xa7, 0x79, 0x1b, 0x28, 0x6d, 0x6c, 0x03, 0x90, 0xab, 0x0b, 0x40, 0xd8, + 0xce, 0xa5, 0xc0, 0x7b, 0x39, 0x57, 0x35, 0x5b, 0x57, 0xc8, 0xd1, 0x1c, 0x59, 0x2b, 0x8f, 0xff, + 0xf3, 0x8d, 0xd7, 0xab, 0x82, 0x2e, 0xfe, 0x97, 0x5f, 0x3e, 0x7a, 0x03, 0x2d, 0xce, 0xc8, 0x78, + 0x1c, 0xf9, 0x45, 0x87, 0xf3, 0x32, 0xce, 0x08, 0xa7, 0xe1, 0x5c, 0xd6, 0x7a, 0xe3, 0xf0, 0xe9, + 0x3b, 0x7b, 0x18, 0xe5, 0x86, 0x54, 0x3f, 0x74, 0x95, 0x99, 0xaf, 0xaf, 0xe1, 0x5b, 0xfc, 0x4a, + 0x06, 0xcd, 0x61, 0x2f, 0xa0, 0x13, 0xca, 0xa9, 0x57, 0x4c, 0x91, 0xb7, 0xbe, 0x7f, 0xd7, 0xa4, + 0xf3, 0x27, 0xef, 0xec, 0x7c, 0x20, 0x0d, 0xa9, 0xc1, 0xb4, 0xe0, 0xfb, 0x66, 0x70, 0x15, 0xb1, + 0x36, 0xb9, 0xb6, 0xd7, 0x26, 0x57, 0xfb, 0x0f, 0x0d, 0x6e, 0x5d, 0x7d, 0x24, 0xc4, 0xa0, 0x71, + 0x39, 0x0f, 0x8a, 0x52, 0x72, 0xfe, 0x67, 0xa2, 0x8c, 0xcb, 0x41, 0x92, 0x99, 0x31, 0x67, 0x73, + 0xbc, 0xe8, 0xa4, 0xfd, 0x04, 0xf4, 0x55, 0x05, 0xa4, 0x43, 0xf9, 0x35, 0x9d, 0xab, 0x0a, 0x14, + 0x4b, 0xf4, 0x3e, 0x54, 0x2f, 0xc8, 0x64, 0x96, 0x57, 0x9d, 0x86, 0x73, 0xe1, 0xab, 0xd2, 0x97, + 0x5a, 0x7b, 0x0f, 0x6e, 0x5e, 0x99, 0xa3, 0xce, 0x8f, 0x1a, 0xec, 0x2e, 0x15, 0x07, 0xba, 0x07, + 0x6d, 0x6c, 0x9f, 0x9e, 0xda, 0xe7, 0xb2, 0xcd, 0xaf, 0x0f, 0x83, 0x95, 0x8e, 0xaf, 0x89, 0x1e, + 0xea, 0x9e, 0xf7, 0xfb, 0x42, 0x28, 0x2d, 0xb7, 0xfc, 0xe5, 0x69, 0xd0, 0x80, 0x2d, 0xc7, 0xb4, + 0x06, 0xa2, 0xd7, 0x56, 0xc5, 0xa8, 0xc9, 0x09, 0x4f, 0x38, 0x33, 0x07, 0xde, 0x51, 0xaf, 0xff, + 0x4c, 0xaf, 0x1d, 0x01, 0x6c, 0x17, 0x65, 0x70, 0xf4, 0x93, 0x06, 0x0f, 0xfc, 0x64, 0xba, 0x29, + 0xb3, 0x47, 0x4d, 0x5c, 0x7c, 0xf7, 0x39, 0xe2, 0x79, 0x39, 0xda, 0xf7, 0x8e, 0xda, 0x12, 0x26, + 0x13, 0x12, 0x87, 0x46, 0xc2, 0xc2, 0x6e, 0x48, 0x63, 0xf9, 0xf8, 0xd4, 0x47, 0x24, 0x49, 0xa3, + 0xec, 0x1f, 0x3f, 0x24, 0x1f, 0xaf, 0x81, 0xbf, 0x94, 0x2a, 0x27, 0x3d, 0xf7, 0xec, 0x65, 0x4d, + 0xda, 0xf8, 0xf4, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb4, 0xdd, 0x92, 0xf0, 0x8b, 0x0a, 0x00, + 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go index cbd3fb2e5..b56328742 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1/servicemanager.pb.go @@ -7,7 +7,6 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" -import _ "google.golang.org/genproto/googleapis/api/serviceconfig" import google_api22 "google.golang.org/genproto/googleapis/api/serviceconfig" import google_longrunning "google.golang.org/genproto/googleapis/longrunning" import google_protobuf1 "github.com/golang/protobuf/ptypes/any" @@ -1421,94 +1420,94 @@ func init() { } var fileDescriptor1 = []byte{ - // 1420 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0x67, 0x9c, 0xb4, 0x4d, 0x5e, 0x62, 0xb7, 0x9d, 0xb8, 0x8d, 0xeb, 0x34, 0x6a, 0xba, 0x81, - 0x12, 0xa5, 0xd4, 0xab, 0xa4, 0xff, 0xa8, 0x53, 0x0e, 0x4d, 0x02, 0x55, 0x44, 0x4b, 0x23, 0xa7, - 0x05, 0x54, 0x90, 0xac, 0xcd, 0xee, 0x64, 0xbb, 0x74, 0x3d, 0x63, 0x76, 0xd6, 0x09, 0x69, 0x94, - 0x4b, 0x55, 0x09, 0x09, 0x4e, 0xa8, 0x40, 0x25, 0x8e, 0x15, 0xe2, 0xc0, 0x81, 0x0b, 0x07, 0x24, - 0x0e, 0x48, 0x7c, 0x06, 0xc4, 0x37, 0xe0, 0x33, 0x70, 0x46, 0x9e, 0x9d, 0x75, 0x76, 0xec, 0x8d, - 0x77, 0xd7, 0x02, 0x8e, 0xfb, 0x66, 0x7e, 0xef, 0xfd, 0xe6, 0xcd, 0x7b, 0x33, 0xbf, 0x59, 0xb8, - 0x62, 0x33, 0x66, 0xbb, 0x44, 0x37, 0x9a, 0x8e, 0xce, 0x89, 0xb7, 0xed, 0x98, 0xa4, 0x61, 0x50, - 0xc3, 0x26, 0x0d, 0x42, 0x7d, 0x7d, 0x7b, 0x41, 0x35, 0x7a, 0x95, 0xa6, 0xc7, 0x7c, 0x86, 0xcf, - 0x05, 0xa8, 0x8a, 0xd1, 0x74, 0x2a, 0x3d, 0xa8, 0xca, 0xf6, 0x42, 0xf9, 0x6c, 0xc4, 0xad, 0x41, - 0x29, 0xf3, 0x0d, 0xdf, 0x61, 0x94, 0x07, 0xf0, 0xf2, 0xa9, 0xe8, 0x68, 0xcb, 0x7f, 0x24, 0xcd, - 0xa5, 0x5e, 0x2e, 0x72, 0x44, 0x4f, 0x62, 0xe9, 0x11, 0xce, 0x5a, 0x9e, 0x49, 0xc2, 0x08, 0xb3, - 0x12, 0xe0, 0x32, 0x6a, 0x7b, 0x2d, 0x4a, 0x1d, 0x6a, 0xeb, 0xac, 0x49, 0x3c, 0x85, 0xc6, 0x19, - 0x39, 0x49, 0x7c, 0x6d, 0xb6, 0xb6, 0x74, 0x83, 0xee, 0xca, 0xa1, 0x99, 0xee, 0xa1, 0x2d, 0x87, - 0xb8, 0x56, 0xbd, 0x61, 0xf0, 0xc7, 0x72, 0xc6, 0xd9, 0xee, 0x19, 0xdc, 0xf7, 0x5a, 0xa6, 0x2f, - 0x47, 0x27, 0xe5, 0xa8, 0xd7, 0x34, 0x75, 0xee, 0x1b, 0x7e, 0x4b, 0xc6, 0xd4, 0x5e, 0x22, 0x98, - 0xb8, 0xe3, 0x70, 0x7f, 0x23, 0x58, 0x05, 0xaf, 0x91, 0x4f, 0x5b, 0x84, 0xfb, 0xb8, 0x02, 0x13, - 0x4d, 0x8f, 0x59, 0x2d, 0x93, 0x78, 0xf5, 0xa6, 0xc7, 0x3e, 0x21, 0xa6, 0x5f, 0x77, 0xac, 0x12, - 0x9a, 0x41, 0x73, 0xa3, 0xb5, 0x93, 0xe1, 0xd0, 0x7a, 0x30, 0xb2, 0x66, 0xe1, 0x29, 0x18, 0x6d, - 0x1a, 0x36, 0xa9, 0x73, 0xe7, 0x09, 0x29, 0x1d, 0x99, 0x41, 0x73, 0x47, 0x6a, 0x23, 0x6d, 0xc3, - 0x86, 0xf3, 0x84, 0xe0, 0x69, 0x00, 0x31, 0xe8, 0xb3, 0xc7, 0x84, 0x96, 0x8e, 0x0a, 0x1f, 0x62, - 0xfa, 0xfd, 0xb6, 0x01, 0x9f, 0x83, 0x31, 0x93, 0x51, 0xde, 0x6a, 0x10, 0xaf, 0x1d, 0xe3, 0x98, - 0x18, 0x87, 0xd0, 0xb4, 0x66, 0x69, 0x5f, 0x22, 0x28, 0xaa, 0x24, 0x79, 0x93, 0x51, 0x4e, 0xf0, - 0xbb, 0x30, 0x22, 0xd3, 0xcf, 0x4b, 0x68, 0x66, 0x68, 0x6e, 0x6c, 0x51, 0xaf, 0x24, 0x94, 0x42, - 0xe5, 0xae, 0xf8, 0xb2, 0xa4, 0xaf, 0x5a, 0xc7, 0x01, 0xbe, 0x00, 0xc7, 0x29, 0xf9, 0xcc, 0xaf, - 0x47, 0xa8, 0xe6, 0x04, 0x95, 0x7c, 0xdb, 0xbc, 0x1e, 0xd2, 0xd5, 0xae, 0xc1, 0xc9, 0xdb, 0x24, - 0xe4, 0x12, 0xe6, 0xeb, 0x3c, 0x8c, 0x4b, 0x47, 0x75, 0x6a, 0x34, 0x88, 0x4c, 0xd4, 0x98, 0xb4, - 0xbd, 0x67, 0x34, 0x88, 0x66, 0x40, 0x71, 0xc5, 0x23, 0x86, 0x4f, 0xba, 0xa0, 0x6b, 0x70, 0x4c, - 0x4e, 0x13, 0xa8, 0x01, 0xd6, 0x10, 0xe2, 0xb5, 0x1b, 0x50, 0x5c, 0x25, 0x2e, 0xe9, 0x09, 0x91, - 0x82, 0xdd, 0x12, 0x9c, 0x7e, 0x40, 0xad, 0x01, 0xc1, 0x16, 0x4c, 0xf6, 0x80, 0xe5, 0x16, 0xfd, - 0x8b, 0xab, 0xfb, 0x13, 0xc1, 0xe4, 0x41, 0xe6, 0x57, 0x18, 0xdd, 0x72, 0xec, 0xf4, 0x24, 0xdb, - 0x25, 0x6a, 0x0a, 0x4c, 0xbb, 0xc8, 0x82, 0x9d, 0x1d, 0x09, 0x0c, 0x6b, 0x16, 0xfe, 0x00, 0x86, - 0xb7, 0x1d, 0xb2, 0x53, 0x1a, 0x9a, 0x41, 0x73, 0x85, 0xc5, 0x95, 0x44, 0x8e, 0x87, 0xf0, 0xa8, - 0x04, 0x5f, 0xef, 0x3b, 0x64, 0xa7, 0x26, 0x1c, 0x6a, 0xe7, 0x01, 0x0e, 0x6c, 0x78, 0x14, 0x8e, - 0x2c, 0xdf, 0xda, 0x58, 0x5b, 0x39, 0xf1, 0x0a, 0x1e, 0x81, 0xe1, 0x77, 0x1e, 0xdc, 0xb9, 0x73, - 0x02, 0x69, 0x4f, 0xe0, 0x4c, 0xa4, 0xba, 0x83, 0xd9, 0x3c, 0xc3, 0xc2, 0xd4, 0xf6, 0xca, 0x75, - 0xb7, 0x97, 0xd2, 0x9a, 0x43, 0x6a, 0x6b, 0x6a, 0x4f, 0x11, 0x94, 0xe3, 0x82, 0xcb, 0xdd, 0xbb, - 0x09, 0xc7, 0xc3, 0xe8, 0x41, 0xaa, 0xc2, 0x3e, 0x9b, 0x88, 0x66, 0x28, 0xdc, 0xa9, 0x02, 0x57, - 0xbc, 0xa4, 0xee, 0xa8, 0x3d, 0x28, 0x2b, 0x9d, 0x91, 0x79, 0x6b, 0xab, 0x50, 0x50, 0x69, 0x8a, - 0x38, 0x87, 0xb0, 0xcc, 0x2b, 0x2c, 0xb5, 0x5f, 0x10, 0x9c, 0xd9, 0x68, 0x6d, 0x36, 0x1c, 0x3f, - 0x30, 0x6c, 0x88, 0x83, 0x3b, 0x43, 0xf0, 0x1a, 0xe4, 0x65, 0x5d, 0x05, 0x67, 0xbe, 0x8c, 0x7d, - 0x29, 0xb1, 0x86, 0x94, 0x78, 0xe3, 0x66, 0xe4, 0x0b, 0xcf, 0x42, 0x7e, 0xdb, 0x70, 0x1d, 0xcb, - 0xf0, 0x49, 0x9d, 0x51, 0x77, 0x57, 0xec, 0xdb, 0x48, 0x6d, 0x3c, 0x34, 0xde, 0xa3, 0xee, 0xae, - 0xf6, 0x21, 0x94, 0xe3, 0x88, 0xcb, 0xad, 0xeb, 0xcd, 0x09, 0x4a, 0x9d, 0x93, 0x67, 0x08, 0xa6, - 0xd4, 0xb3, 0x8a, 0xb9, 0x2e, 0x6b, 0xf9, 0x19, 0xb2, 0xb2, 0x0c, 0xc7, 0xbc, 0x00, 0x24, 0xf3, - 0x31, 0x97, 0x98, 0x8f, 0x30, 0x48, 0x08, 0xd4, 0x9e, 0xab, 0xc5, 0x29, 0xc7, 0xff, 0xa7, 0xd6, - 0xc0, 0xa7, 0xe1, 0xe8, 0x96, 0xe3, 0xfa, 0xc4, 0x2b, 0x0d, 0x0b, 0x9c, 0xfc, 0x6a, 0xdf, 0x46, - 0x53, 0xb1, 0xac, 0x64, 0xe2, 0x57, 0x61, 0x44, 0x2e, 0x20, 0x6c, 0x96, 0xf4, 0x4b, 0xef, 0x20, - 0x53, 0xf7, 0xce, 0xc7, 0x50, 0x8a, 0xdc, 0x46, 0x99, 0xb7, 0x69, 0x1a, 0x40, 0x86, 0x3c, 0x38, - 0x15, 0x47, 0xa5, 0x65, 0xcd, 0xd2, 0x1e, 0x42, 0xf1, 0x6d, 0x6a, 0x6c, 0xba, 0xd9, 0xef, 0x84, - 0xee, 0x5b, 0x3d, 0xd7, 0x73, 0xab, 0x7f, 0x04, 0xa7, 0x56, 0x1d, 0xfe, 0x1f, 0x39, 0xff, 0x1c, - 0xc1, 0xd4, 0x6d, 0x42, 0xdb, 0x0a, 0xab, 0x73, 0x9c, 0x34, 0x99, 0xd7, 0x49, 0xcd, 0x65, 0x00, - 0x4a, 0x76, 0xd4, 0xce, 0x28, 0x86, 0xdb, 0x14, 0x6a, 0xa8, 0xca, 0x2d, 0xba, 0x5b, 0x1b, 0xa5, - 0x64, 0x27, 0xf0, 0xd0, 0x06, 0x31, 0xd7, 0x52, 0x8f, 0x98, 0x43, 0x40, 0xcc, 0xb5, 0x64, 0x2f, - 0xfd, 0x8d, 0xe0, 0x6c, 0x3c, 0x13, 0x59, 0x2f, 0x29, 0x96, 0x5b, 0x80, 0x5c, 0x67, 0x95, 0x39, - 0xc7, 0xc2, 0xf7, 0xa1, 0x60, 0x3e, 0x32, 0xa8, 0x4d, 0xea, 0x9e, 0xf0, 0xc5, 0x4b, 0x43, 0xa2, - 0xd0, 0x52, 0x9c, 0x39, 0x02, 0x26, 0x19, 0xe4, 0xcd, 0xc8, 0x17, 0xc7, 0x77, 0x61, 0xcc, 0x72, - 0x0c, 0x9b, 0x32, 0xee, 0x3b, 0x26, 0x2f, 0x0d, 0x0b, 0x97, 0x17, 0x13, 0x5d, 0xae, 0x76, 0x30, - 0xb5, 0x28, 0x7e, 0xf1, 0xa7, 0x09, 0x28, 0xc8, 0x9d, 0x0d, 0x6e, 0x74, 0x0f, 0x7f, 0x85, 0x60, - 0x3c, 0x2a, 0xe4, 0xf0, 0x95, 0x44, 0xef, 0x31, 0xe2, 0xb4, 0x7c, 0x35, 0x23, 0x2a, 0x48, 0xb4, - 0x56, 0x7c, 0xfa, 0xc7, 0x5f, 0xcf, 0x73, 0x05, 0x3c, 0x1e, 0x79, 0x47, 0x70, 0xfc, 0x2d, 0x02, - 0x38, 0xe8, 0x20, 0xbc, 0x98, 0xe1, 0xea, 0x0f, 0xf9, 0x64, 0x95, 0x34, 0xda, 0xac, 0x60, 0x32, - 0x8d, 0xa7, 0xa2, 0x4c, 0xf4, 0xbd, 0x68, 0x19, 0xec, 0xe3, 0x67, 0x08, 0xf2, 0xca, 0x21, 0x8c, - 0x93, 0xd7, 0x1d, 0x27, 0x30, 0xcb, 0xd3, 0x21, 0x2c, 0xf2, 0xfa, 0xa8, 0xdc, 0x0b, 0x5f, 0x1f, - 0xda, 0xb4, 0x20, 0x33, 0xa9, 0x29, 0x69, 0xa9, 0x86, 0xaa, 0x0b, 0x7f, 0x81, 0x20, 0xaf, 0x88, - 0xca, 0x14, 0x34, 0xe2, 0x44, 0x68, 0x12, 0x0d, 0x99, 0x93, 0xf9, 0xbe, 0x39, 0x79, 0x81, 0xe0, - 0x78, 0x97, 0xd2, 0xc4, 0xd7, 0x13, 0xe9, 0xc4, 0x0b, 0xdb, 0x24, 0x42, 0x6f, 0x08, 0x42, 0x17, - 0xb4, 0x57, 0xfb, 0x10, 0xaa, 0xb6, 0xa4, 0x6b, 0xfc, 0x2b, 0x02, 0xdc, 0x2b, 0xa4, 0x70, 0x35, - 0x4b, 0xa9, 0xaa, 0xd2, 0xaf, 0xbc, 0x34, 0x10, 0x56, 0x16, 0xfb, 0x45, 0xc1, 0xfe, 0x35, 0x3c, - 0xdb, 0x87, 0xbd, 0x2e, 0x35, 0x1d, 0xfe, 0x0e, 0xc1, 0x89, 0x6e, 0x45, 0x8b, 0xdf, 0x1c, 0x54, - 0x04, 0x97, 0xe3, 0x24, 0x86, 0x76, 0x5d, 0x10, 0x5a, 0xc0, 0x7a, 0x0a, 0x42, 0xfa, 0x5e, 0x47, - 0xa9, 0xef, 0xe3, 0xef, 0x11, 0x4c, 0xc4, 0xc8, 0x43, 0xbc, 0x94, 0xad, 0x1b, 0x52, 0x50, 0x5c, - 0x12, 0x14, 0xaf, 0x6a, 0x69, 0x72, 0x56, 0xed, 0x52, 0x57, 0xf8, 0x07, 0x04, 0xb8, 0x57, 0x8e, - 0xa5, 0x28, 0x80, 0x43, 0xc5, 0x67, 0x52, 0x81, 0x5e, 0x15, 0x74, 0x75, 0x6d, 0x3e, 0x0d, 0x5d, - 0x2e, 0xa2, 0x54, 0xd1, 0x3c, 0xfe, 0x4d, 0x7d, 0xf2, 0x87, 0xfa, 0x05, 0x67, 0x2a, 0xb7, 0x2e, - 0x2d, 0x56, 0xbe, 0x39, 0x18, 0x58, 0x16, 0xab, 0x6c, 0x35, 0xdc, 0xaf, 0xd5, 0xf4, 0x8e, 0x34, - 0xfa, 0x19, 0x29, 0x2f, 0xf0, 0xc0, 0x8c, 0x6f, 0x64, 0x39, 0xb8, 0x15, 0x9d, 0x54, 0x4e, 0xad, - 0xcf, 0xb4, 0x1b, 0x82, 0xe8, 0x65, 0xbc, 0x90, 0x86, 0xa8, 0xbe, 0x77, 0x20, 0xad, 0xf6, 0xf1, - 0x8f, 0xa8, 0xfb, 0xfd, 0x2f, 0x89, 0xdf, 0xcc, 0x78, 0xaa, 0xab, 0xdc, 0x53, 0xd6, 0x48, 0xaa, - 0xcc, 0x56, 0x43, 0xe5, 0x8d, 0x7f, 0x47, 0x50, 0x8c, 0x13, 0x2d, 0x29, 0xc8, 0xf6, 0x51, 0x5d, - 0xe5, 0xb7, 0x06, 0x44, 0xab, 0x65, 0xa2, 0x9d, 0x57, 0x6e, 0x2a, 0x3b, 0x06, 0xd2, 0xae, 0xf3, - 0xaf, 0x11, 0xe4, 0x15, 0xf1, 0x9a, 0xe2, 0xe2, 0x8a, 0x13, 0xbb, 0x49, 0x29, 0xbe, 0x24, 0x58, - 0xbd, 0xae, 0x69, 0xfd, 0xee, 0x09, 0x22, 0x1c, 0xb7, 0x69, 0xbd, 0x40, 0x50, 0x50, 0x75, 0x2f, - 0xbe, 0x96, 0x42, 0x63, 0xf1, 0xec, 0xc4, 0x2a, 0x82, 0xd8, 0x5c, 0xdf, 0xe3, 0xac, 0x6a, 0x05, - 0x9e, 0xab, 0x68, 0x7e, 0xf9, 0x1b, 0x04, 0xb3, 0x26, 0x6b, 0x24, 0x91, 0x59, 0x9e, 0x50, 0x55, - 0xdd, 0x7a, 0x5b, 0xf8, 0xae, 0xa3, 0x87, 0xeb, 0x12, 0x67, 0x33, 0xd7, 0xa0, 0x76, 0x85, 0x79, - 0xb6, 0x6e, 0x13, 0x2a, 0x64, 0xb1, 0xfc, 0x5f, 0x6a, 0x34, 0x1d, 0x7e, 0xe8, 0x3f, 0xd3, 0xa5, - 0x1e, 0xe3, 0xcb, 0xdc, 0xf0, 0xed, 0x5b, 0x1b, 0x77, 0x37, 0x8f, 0x0a, 0x1f, 0x97, 0xff, 0x09, - 0x00, 0x00, 0xff, 0xff, 0x50, 0x43, 0x0b, 0xb9, 0x1c, 0x16, 0x00, 0x00, + // 1417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x67, 0x36, 0x69, 0x9b, 0xbc, 0x64, 0xb7, 0xed, 0x64, 0x69, 0xb6, 0x9b, 0x46, 0x4d, 0x9d, + 0x52, 0xa2, 0x94, 0xae, 0xd5, 0xf4, 0x0f, 0x74, 0x53, 0x0e, 0x4d, 0x02, 0x55, 0x44, 0x4b, 0xa3, + 0x4d, 0x4b, 0x51, 0x01, 0xad, 0x1c, 0x7b, 0x62, 0x4c, 0xbd, 0x33, 0x8b, 0xc7, 0x9b, 0x90, 0x46, + 0xbd, 0x54, 0x95, 0x90, 0xe0, 0x84, 0x0a, 0xf4, 0x5e, 0x21, 0x0e, 0x5c, 0x39, 0x20, 0x71, 0x40, + 0xf0, 0x19, 0x80, 0x6f, 0xc0, 0x67, 0xe0, 0x8c, 0x3c, 0x1e, 0x6f, 0x3c, 0xbb, 0xce, 0xda, 0x8e, + 0x80, 0xa3, 0xdf, 0xf8, 0xf7, 0xde, 0x6f, 0xde, 0xbc, 0xf7, 0xe6, 0x67, 0xc3, 0x65, 0x9b, 0x31, + 0xdb, 0x25, 0xba, 0xd1, 0x76, 0x74, 0x4e, 0xbc, 0x2d, 0xc7, 0x24, 0x2d, 0x83, 0x1a, 0x36, 0x69, + 0x11, 0xea, 0xeb, 0x5b, 0x17, 0x55, 0xa3, 0x57, 0x6b, 0x7b, 0xcc, 0x67, 0xf8, 0x74, 0x88, 0xaa, + 0x19, 0x6d, 0xa7, 0xd6, 0x87, 0xaa, 0x6d, 0x5d, 0xac, 0x9e, 0x8a, 0xb9, 0x35, 0x28, 0x65, 0xbe, + 0xe1, 0x3b, 0x8c, 0xf2, 0x10, 0x5e, 0xad, 0xf4, 0x07, 0x95, 0x2b, 0x7a, 0x1a, 0x1d, 0x8f, 0x70, + 0xd6, 0xf1, 0x4c, 0x12, 0xb9, 0x9a, 0x95, 0x00, 0x97, 0x51, 0xdb, 0xeb, 0x50, 0xea, 0x50, 0x5b, + 0x67, 0x6d, 0xe2, 0x29, 0xf1, 0x4e, 0xca, 0x97, 0xc4, 0xd3, 0x46, 0x67, 0x53, 0x37, 0xe8, 0x8e, + 0x5c, 0x9a, 0xe9, 0x5d, 0xda, 0x74, 0x88, 0x6b, 0x35, 0x5b, 0x06, 0x7f, 0x28, 0xdf, 0x38, 0xd5, + 0xfb, 0x06, 0xf7, 0xbd, 0x8e, 0xe9, 0xcb, 0xd5, 0x49, 0xb9, 0xea, 0xb5, 0x4d, 0x9d, 0xfb, 0x86, + 0xdf, 0x91, 0x31, 0xb5, 0x17, 0x08, 0x26, 0x6e, 0x39, 0xdc, 0x5f, 0x0f, 0x77, 0xc1, 0x1b, 0xe4, + 0xd3, 0x0e, 0xe1, 0x3e, 0xae, 0xc1, 0x44, 0xdb, 0x63, 0x56, 0xc7, 0x24, 0x5e, 0xb3, 0xed, 0xb1, + 0x4f, 0x88, 0xe9, 0x37, 0x1d, 0xab, 0x82, 0x66, 0xd0, 0xdc, 0x68, 0xe3, 0x78, 0xb4, 0xb4, 0x16, + 0xae, 0xac, 0x5a, 0x78, 0x0a, 0x46, 0xdb, 0x86, 0x4d, 0x9a, 0xdc, 0x79, 0x44, 0x2a, 0x87, 0x66, + 0xd0, 0xdc, 0xa1, 0xc6, 0x48, 0x60, 0x58, 0x77, 0x1e, 0x11, 0x3c, 0x0d, 0x20, 0x16, 0x7d, 0xf6, + 0x90, 0xd0, 0xca, 0x61, 0xe1, 0x43, 0xbc, 0x7e, 0x37, 0x30, 0xe0, 0xd3, 0x30, 0x66, 0x32, 0xca, + 0x3b, 0x2d, 0xe2, 0x05, 0x31, 0x8e, 0x88, 0x75, 0x88, 0x4c, 0xab, 0x96, 0xf6, 0x25, 0x82, 0xb2, + 0x4a, 0x92, 0xb7, 0x19, 0xe5, 0x04, 0xbf, 0x03, 0x23, 0x32, 0xfd, 0xbc, 0x82, 0x66, 0x86, 0xe6, + 0xc6, 0x16, 0xf4, 0x5a, 0xca, 0x99, 0xd7, 0x6e, 0x8b, 0x27, 0x4b, 0xfa, 0x6a, 0x74, 0x1d, 0xe0, + 0x73, 0x70, 0x94, 0x92, 0xcf, 0xfc, 0x66, 0x8c, 0x6a, 0x41, 0x50, 0x29, 0x06, 0xe6, 0xb5, 0x88, + 0xae, 0x76, 0x15, 0x8e, 0xdf, 0x24, 0x11, 0x97, 0x28, 0x5f, 0x67, 0x60, 0x5c, 0x3a, 0x6a, 0x52, + 0xa3, 0x45, 0x64, 0xa2, 0xc6, 0xa4, 0xed, 0x5d, 0xa3, 0x45, 0x34, 0x03, 0xca, 0xcb, 0x1e, 0x31, + 0x7c, 0xd2, 0x03, 0x5d, 0x85, 0x23, 0xf2, 0x35, 0x81, 0x3a, 0xc0, 0x1e, 0x22, 0xbc, 0x76, 0x0d, + 0xca, 0x2b, 0xc4, 0x25, 0x7d, 0x21, 0x32, 0xb0, 0x5b, 0x84, 0x13, 0xf7, 0xa8, 0x75, 0x40, 0xb0, + 0x05, 0x93, 0x7d, 0x60, 0x79, 0x44, 0xff, 0xe2, 0xee, 0xfe, 0x44, 0x30, 0xb9, 0x97, 0xf9, 0x65, + 0x46, 0x37, 0x1d, 0x3b, 0x3b, 0xc9, 0xa0, 0x44, 0x4d, 0x81, 0x09, 0x8a, 0x2c, 0x3c, 0xd9, 0x91, + 0xd0, 0xb0, 0x6a, 0xe1, 0xfb, 0x30, 0xbc, 0xe5, 0x90, 0xed, 0xca, 0xd0, 0x0c, 0x9a, 0x2b, 0x2d, + 0x2c, 0xa7, 0x72, 0xdc, 0x87, 0x47, 0x2d, 0x7c, 0x7a, 0xcf, 0x21, 0xdb, 0x0d, 0xe1, 0x50, 0x3b, + 0x03, 0xb0, 0x67, 0xc3, 0xa3, 0x70, 0x68, 0xe9, 0xc6, 0xfa, 0xea, 0xf2, 0xb1, 0x97, 0xf0, 0x08, + 0x0c, 0xbf, 0x7d, 0xef, 0xd6, 0xad, 0x63, 0x48, 0x7b, 0x04, 0x27, 0x63, 0xd5, 0x1d, 0xbe, 0xcd, + 0x73, 0x6c, 0x4c, 0x6d, 0xaf, 0x42, 0x6f, 0x7b, 0x29, 0xad, 0x39, 0xa4, 0xb6, 0xa6, 0xf6, 0x04, + 0x41, 0x35, 0x29, 0xb8, 0x3c, 0xbd, 0xeb, 0x70, 0x34, 0x8a, 0x1e, 0xa6, 0x2a, 0xea, 0xb3, 0x89, + 0x78, 0x86, 0xa2, 0x93, 0x2a, 0x71, 0xc5, 0x4b, 0xe6, 0x8e, 0xda, 0x85, 0xaa, 0xd2, 0x19, 0xb9, + 0x8f, 0xb6, 0x0e, 0x25, 0x95, 0xa6, 0x88, 0xb3, 0x0f, 0xcb, 0xa2, 0xc2, 0x52, 0xfb, 0x09, 0xc1, + 0xc9, 0xf5, 0xce, 0x46, 0xcb, 0xf1, 0x43, 0xc3, 0xba, 0x18, 0xdc, 0x39, 0x82, 0x37, 0xa0, 0x28, + 0xeb, 0x2a, 0x9c, 0xf9, 0x32, 0xf6, 0x85, 0xd4, 0x1a, 0x52, 0xe2, 0x8d, 0x9b, 0xb1, 0x27, 0x3c, + 0x0b, 0xc5, 0x2d, 0xc3, 0x75, 0x2c, 0xc3, 0x27, 0x4d, 0x46, 0xdd, 0x1d, 0x71, 0x6e, 0x23, 0x8d, + 0xf1, 0xc8, 0x78, 0x87, 0xba, 0x3b, 0xda, 0xfb, 0x50, 0x4d, 0x22, 0x2e, 0x8f, 0xae, 0x3f, 0x27, + 0x28, 0x73, 0x4e, 0x9e, 0x22, 0x98, 0x52, 0x67, 0x15, 0x73, 0x5d, 0xd6, 0xf1, 0x73, 0x64, 0x65, + 0x09, 0x8e, 0x78, 0x21, 0x48, 0xe6, 0x63, 0x2e, 0x35, 0x1f, 0x51, 0x90, 0x08, 0xa8, 0x3d, 0x53, + 0x8b, 0x53, 0xae, 0xff, 0x4f, 0xad, 0x81, 0x4f, 0xc0, 0xe1, 0x4d, 0xc7, 0xf5, 0x89, 0x57, 0x19, + 0x16, 0x38, 0xf9, 0x14, 0xdc, 0x46, 0x53, 0x89, 0xac, 0x64, 0xe2, 0x57, 0x60, 0x44, 0x6e, 0x20, + 0x6a, 0x96, 0xec, 0x5b, 0xef, 0x22, 0x33, 0xf7, 0xce, 0x87, 0x50, 0x89, 0xdd, 0x46, 0xb9, 0x8f, + 0x69, 0x1a, 0x40, 0x86, 0xdc, 0x9b, 0x8a, 0xa3, 0xd2, 0xb2, 0x6a, 0x69, 0x0f, 0xa0, 0xfc, 0x16, + 0x35, 0x36, 0xdc, 0xfc, 0x77, 0x42, 0xef, 0xad, 0x5e, 0xe8, 0xbb, 0xd5, 0x3f, 0x80, 0x97, 0x57, + 0x1c, 0xfe, 0x1f, 0x39, 0xff, 0x1c, 0xc1, 0xd4, 0x4d, 0x42, 0x03, 0x85, 0xd5, 0x1d, 0x27, 0x6d, + 0xe6, 0x75, 0x53, 0x73, 0x09, 0x80, 0x92, 0x6d, 0xb5, 0x33, 0xca, 0xd1, 0x31, 0x45, 0x1a, 0xaa, + 0x76, 0x83, 0xee, 0x34, 0x46, 0x29, 0xd9, 0x0e, 0x3d, 0x04, 0x20, 0xe6, 0x5a, 0xea, 0x88, 0xd9, + 0x07, 0xc4, 0x5c, 0x4b, 0xf6, 0xd2, 0xdf, 0x08, 0x4e, 0x25, 0x33, 0x91, 0xf5, 0x92, 0x61, 0xbb, + 0x25, 0x28, 0x74, 0x77, 0x59, 0x70, 0x2c, 0x7c, 0x17, 0x4a, 0xe6, 0xc7, 0x06, 0xb5, 0x49, 0xd3, + 0x13, 0xbe, 0x78, 0x65, 0x48, 0x14, 0x5a, 0x86, 0x99, 0x23, 0x60, 0x92, 0x41, 0xd1, 0x8c, 0x3d, + 0x71, 0x7c, 0x1b, 0xc6, 0x2c, 0xc7, 0xb0, 0x29, 0xe3, 0xbe, 0x63, 0xf2, 0xca, 0xb0, 0x70, 0x79, + 0x3e, 0xd5, 0xe5, 0x4a, 0x17, 0xd3, 0x88, 0xe3, 0x17, 0xfe, 0x98, 0x80, 0x92, 0x3c, 0xd9, 0xf0, + 0x46, 0xf7, 0xf0, 0x57, 0x08, 0xc6, 0xe3, 0x42, 0x0e, 0x5f, 0x4e, 0xf5, 0x9e, 0x20, 0x4e, 0xab, + 0x57, 0x72, 0xa2, 0xc2, 0x44, 0x6b, 0xe5, 0x27, 0xbf, 0xff, 0xf5, 0xac, 0x50, 0xc2, 0xe3, 0xb1, + 0x0f, 0x06, 0x8e, 0xbf, 0x45, 0x00, 0x7b, 0x1d, 0x84, 0x17, 0x72, 0x5c, 0xfd, 0x11, 0x9f, 0xbc, + 0x92, 0x46, 0x9b, 0x15, 0x4c, 0xa6, 0xf1, 0x54, 0x9c, 0x89, 0xbe, 0x1b, 0x2f, 0x83, 0xc7, 0xf8, + 0x29, 0x82, 0xa2, 0x32, 0x84, 0x71, 0xfa, 0xbe, 0x93, 0x04, 0x66, 0x75, 0x3a, 0x82, 0xc5, 0xbe, + 0x3e, 0x6a, 0x77, 0xa2, 0xaf, 0x0f, 0x6d, 0x5a, 0x90, 0x99, 0xd4, 0x94, 0xb4, 0xd4, 0x23, 0xd5, + 0x85, 0xbf, 0x40, 0x50, 0x54, 0x44, 0x65, 0x06, 0x1a, 0x49, 0x22, 0x34, 0x8d, 0x86, 0xcc, 0xc9, + 0xfc, 0xc0, 0x9c, 0x3c, 0x47, 0x70, 0xb4, 0x47, 0x69, 0xe2, 0xd7, 0x53, 0xe9, 0x24, 0x0b, 0xdb, + 0x34, 0x42, 0xaf, 0x09, 0x42, 0xe7, 0xb4, 0xb3, 0x03, 0x08, 0xd5, 0x3b, 0xd2, 0x35, 0xfe, 0x19, + 0x01, 0xee, 0x17, 0x52, 0xb8, 0x9e, 0xa7, 0x54, 0x55, 0xe9, 0x57, 0x5d, 0x3c, 0x10, 0x56, 0x16, + 0xfb, 0x79, 0xc1, 0xfe, 0x15, 0x3c, 0x3b, 0x80, 0xbd, 0x2e, 0x35, 0x1d, 0xfe, 0x15, 0xc1, 0xb1, + 0x5e, 0x45, 0x8b, 0xdf, 0x38, 0xa8, 0x08, 0xae, 0x26, 0x49, 0x0c, 0xed, 0x23, 0x41, 0xe8, 0x3e, + 0xd6, 0x33, 0x10, 0xd2, 0x77, 0xbb, 0x4a, 0xfd, 0xf1, 0x83, 0xb3, 0x58, 0x4b, 0x87, 0xe0, 0xef, + 0x10, 0x4c, 0x24, 0x88, 0x48, 0xbc, 0x98, 0xaf, 0x67, 0x32, 0x6c, 0x64, 0x51, 0x6c, 0xe4, 0x8a, + 0x96, 0x25, 0xb3, 0xf5, 0x1e, 0x0d, 0x86, 0xbf, 0x47, 0x80, 0xfb, 0x45, 0x5b, 0x86, 0x32, 0xd9, + 0x57, 0xa2, 0xa6, 0x95, 0xf1, 0x15, 0x41, 0x57, 0xd7, 0xe6, 0xb3, 0xd0, 0xe5, 0x22, 0x4a, 0x1d, + 0xcd, 0xe3, 0x5f, 0xd4, 0x1f, 0x03, 0x91, 0xca, 0xc1, 0xb9, 0x8a, 0xb2, 0x47, 0xb1, 0x55, 0xaf, + 0x1f, 0x0c, 0x2c, 0x4b, 0x5a, 0x36, 0x24, 0x1e, 0xd4, 0x90, 0x7a, 0x57, 0x40, 0xfd, 0x88, 0x94, + 0xef, 0xf4, 0xd0, 0x8c, 0xaf, 0xe5, 0x19, 0xef, 0x8a, 0x9a, 0xaa, 0x66, 0x56, 0x71, 0xda, 0x35, + 0x41, 0xf4, 0x12, 0xbe, 0x98, 0x85, 0xa8, 0xbe, 0xbb, 0x27, 0xc0, 0x1e, 0xe3, 0x1f, 0x50, 0xef, + 0x5f, 0x02, 0x49, 0xfc, 0x7a, 0xce, 0xd9, 0xaf, 0x72, 0xcf, 0x58, 0x23, 0x99, 0x32, 0x5b, 0x8f, + 0xf4, 0x39, 0xfe, 0x0d, 0x41, 0x39, 0x49, 0xda, 0x64, 0x20, 0x3b, 0x40, 0x9b, 0x55, 0xdf, 0x3c, + 0x20, 0x5a, 0x2d, 0x13, 0xed, 0x8c, 0x72, 0x9f, 0xd9, 0x09, 0x90, 0xa0, 0xce, 0xbf, 0x46, 0x50, + 0x54, 0x24, 0x6e, 0x86, 0xeb, 0x2d, 0x49, 0x12, 0xa7, 0xa5, 0xf8, 0x82, 0x60, 0xf5, 0xaa, 0x36, + 0x68, 0x96, 0xd5, 0x89, 0x70, 0x1c, 0xd0, 0x7a, 0x8e, 0xa0, 0xa4, 0xaa, 0x63, 0x7c, 0x35, 0x83, + 0x12, 0xe3, 0xf9, 0x89, 0xd5, 0x04, 0xb1, 0xb9, 0x81, 0xe3, 0xac, 0x6e, 0x85, 0x9e, 0xeb, 0x68, + 0x7e, 0xe9, 0x1b, 0x04, 0xb3, 0x26, 0x6b, 0xa5, 0x91, 0x59, 0x9a, 0x50, 0xb5, 0xdf, 0x5a, 0x20, + 0x8f, 0xd7, 0xd0, 0x83, 0x35, 0x89, 0xb3, 0x99, 0x6b, 0x50, 0xbb, 0xc6, 0x3c, 0x5b, 0xb7, 0x09, + 0x15, 0xe2, 0x59, 0xfe, 0x55, 0x35, 0xda, 0x0e, 0xdf, 0xf7, 0xcf, 0xea, 0x62, 0x9f, 0xf1, 0x45, + 0x61, 0xf8, 0xe6, 0x8d, 0xf5, 0xdb, 0x1b, 0x87, 0x85, 0x8f, 0x4b, 0xff, 0x04, 0x00, 0x00, 0xff, + 0xff, 0xc1, 0x4b, 0xa8, 0xee, 0x2b, 0x16, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go index 7b82cffab..28e3f4209 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1/cloud_speech.pb.go @@ -13,6 +13,7 @@ It has these top-level messages: StreamingRecognizeRequest StreamingRecognitionConfig RecognitionConfig + GoogleDataCollectionConfig SpeechContext RecognitionAudio RecognizeResponse @@ -56,21 +57,22 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // All encodings support only 1 channel (mono) audio. // -// If you send a `FLAC` or `WAV` audio file format in the request, -// then if you specify an encoding in `AudioEncoding`, it must match the -// encoding described in the audio header. If it does not match, then the -// request returns an -// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code. You can request -// recognition for `WAV` files that contain either `LINEAR16` or `MULAW` -// encoded audio. -// For audio file formats other than `FLAC` or `WAV`, you must -// specify the audio encoding in your `RecognitionConfig`. -// // For best results, the audio source should be captured and transmitted using // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech -// recognition can be reduced if lossy codecs, which include the other codecs -// listed in this section, are used to capture or transmit the audio, -// particularly if background noise is present. +// recognition can be reduced if lossy codecs are used to capture or transmit +// audio, particularly if background noise is present. Lossy codecs include +// `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`. +// +// The `FLAC` and `WAV` audio file formats include a header that describes the +// included audio content. You can request recognition for `WAV` files that +// contain either `LINEAR16` or `MULAW` encoded audio. +// If you send `FLAC` or `WAV` audio file format in +// your request, you do not need to specify an `AudioEncoding`; the audio +// encoding format is determined from the file header. If you specify +// an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the +// encoding configuration must match the encoding described in the audio +// header; otherwise the request returns an +// [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error code. type RecognitionConfig_AudioEncoding int32 const ( @@ -78,7 +80,7 @@ const ( RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0 // Uncompressed 16-bit signed little-endian samples (Linear PCM). RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1 - // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio + // `FLAC` (Free Lossless Audio // Codec) is the recommended encoding because it is // lossless--therefore recognition is not compromised--and // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream @@ -139,6 +141,32 @@ func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } +// Speech content will not be logged until authorized consent is opted in. +// Once it is opted in, this flag enables/disables logging to override that +// consent. default = ENABLED (logging due to consent). +type GoogleDataCollectionConfig_LoggingConsentState int32 + +const ( + GoogleDataCollectionConfig_ENABLED GoogleDataCollectionConfig_LoggingConsentState = 0 + GoogleDataCollectionConfig_DISABLED GoogleDataCollectionConfig_LoggingConsentState = 1 +) + +var GoogleDataCollectionConfig_LoggingConsentState_name = map[int32]string{ + 0: "ENABLED", + 1: "DISABLED", +} +var GoogleDataCollectionConfig_LoggingConsentState_value = map[string]int32{ + "ENABLED": 0, + "DISABLED": 1, +} + +func (x GoogleDataCollectionConfig_LoggingConsentState) String() string { + return proto.EnumName(GoogleDataCollectionConfig_LoggingConsentState_name, int32(x)) +} +func (GoogleDataCollectionConfig_LoggingConsentState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 0} +} + // Indicates the type of speech event. type StreamingRecognizeResponse_SpeechEventType int32 @@ -168,7 +196,7 @@ func (x StreamingRecognizeResponse_SpeechEventType) String() string { return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x)) } func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{10, 0} + return fileDescriptor0, []int{11, 0} } // The top-level message sent by the client for the `Recognize` method. @@ -406,13 +434,17 @@ func (m *StreamingRecognitionConfig) GetInterimResults() bool { // Provides information to the recognizer that specifies how to process the // request. type RecognitionConfig struct { - // *Required* Encoding of audio data sent in all `RecognitionAudio` messages. + // Encoding of audio data sent in all `RecognitionAudio` messages. + // This field is optional for `FLAC` and `WAV` audio files and required + // for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"` - // *Required* Sample rate in Hertz of the audio data sent in all + // Sample rate in Hertz of the audio data sent in all // `RecognitionAudio` messages. Valid values are: 8000-48000. // 16000 is optimal. For best results, set the sampling rate of the audio // source to 16000 Hz. If that's not possible, use the native sample rate of // the audio source (instead of re-sampling). + // This field is optional for `FLAC` and `WAV` audio files and required + // for all other audio formats. For details, see [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding]. SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` // *Required* The language of the supplied audio as a // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. @@ -443,7 +475,54 @@ type RecognitionConfig struct { // best suited to your domain to get best results. If a model is not // explicitly specified, then we auto-select a model based on the parameters // in the RecognitionConfig. + //
LanguageSince Build SystemsLang/Lib LevelsLang/Lib Levels (Tested) Low-Level Transports Transport Wrappers Protocols
ActionScript0.3.0 ActionScript 3 Yes
C (glib)0.6.0 YesYes2.40.22.54.02.48.22.54.0 YesYesYesYes Yes YesYesYes
C++0.2.0 YesYesC++98, gcc C++98 YesYesYesYesYesYes YesYesYes YesYesYesYes
C#0.2.0 Yes .NET 3.5 / mono 3.2.8.0.NET 4.6.1 / mono 4.6.2.7 YesYesYesYes
Cocoa0.2.0 unknown YesYesYesYesCocoa
DCommon Lisp0.12.0 Yes2.070.22.076.0SBCL 1.4.5YesYesYesYesYesYesYesCommon Lisp
Dlang0.9.0Yes2.073.22.077.1 YesYesYesYesYes YesYesYes YesYesYes
Dart0.10.0 Yes1.20.11.24.21.22.11.24.3 Yes YesYes YesYesYesYes
Delphi0.8.0 2010unknown Yes
.NET Core2.0.02.0.30.11.0Yes2.1.4 YesYesYesYes YesYes YesYesYesYes
Erlang0.3.0 YesR16B0320.0.418.320.0.4 YesYesYesYes YesYes YesYesYesYes
Go0.7.0 Yes1.2.11.8.31.7.61.10 YesYesYes YesYesYes YesYesYesYes
Haskell0.5.0 YesYes7.6.38.0.27.10.38.0.2 YesYesYesYes YesYes YesYesYes
Haxe0.9.3 Yes3.2.13.2.13.4.4 YesYes YesYes YesYesYesYes
Java (SE)0.2.0 YesYes1.7.0_1511.8.0_1441.8.0_151 YesYesYesYes YesYesYes YesYesYesYes
Java (ME)0.5.0 unknown YesYesYes
Javascript0.3.0 Yes unknown YesYes
Lua0.9.2 Yes 5.1.55.2.4 YesYes
node.js0.6.0 Yes4.2.68.9.1YesYes6.x8.xYesYesYes YesYes YesYesYesYes Yes
OCaml0.2.0 4.02.34.04.04.04.0 Yes Yes Yes
Perl0.2.0 Yes5.18.25.26.05.22.15.26.0 YesYesYesYes YesYes YesYesYes
PHP0.2.0 Yes5.5.97.1.87.0.227.1.8 YesYesYes YesYes YesYesYesYes
Python0.2.0 YesYes2.7.6, 3.4.32.7.14, 3.6.32.7.12, 3.5.22.7.14, 3.6.3 YesYes YesYes YesYesYesYes
Ruby0.2.0 Yes1.9.3p4842.3.3p2222.3.1p1122.3.3p222 YesYesYesYes YesYes YesYesYesYes
Rust0.11.0 Yes1.15.11.18.01.17.01.21.0 YesYes Yes YesYesYes
Smalltalk0.2.0 unknown Yes
LanguageSince autoconfcmake MinMax Domain File Memory Pipe Socket TLS 
Build SystemsLang/Lib LevelsLang/Lib Levels (Tested) Low-Level Transports Transport Wrappers Protocols
+ // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + //
ModelDescription
command_and_searchBest for short queries such as voice commands or voice search.
phone_callBest for audio that originated from a phone call (typically + // recorded at an 8khz sampling rate).
videoBest for audio that originated from from video or includes multiple + // speakers. Ideally the audio is recorded at a 16khz or greater + // sampling rate. This is a premium model that costs more than the + // standard rate.
defaultBest for audio that is not one of the specific audio models. + // For example, long-form audio. Ideally the audio is high-fidelity, + // recorded at a 16khz or greater sampling rate.
Model string `protobuf:"bytes,13,opt,name=model" json:"model,omitempty"` + // *Optional* Set to true to use an enhanced model for speech recognition. + // You must also set the `model` field to a valid, enhanced model. If + // `use_enhanced` is set to true and the `model` field is not set, then + // `use_enhanced` is ignored. If `use_enhanced` is true and an enhanced + // version of the specified model does not exist, then the speech is + // recognized using the standard version of the specified model. + // + // Enhanced speech models require that you enable audio logging for + // your request. To enable audio logging, set the `loggingConsentState` field + // to ENABLED in the [GoogleDataCollectionConfig][] section of your request. + // You must also opt-in to the audio logging alpha using the instructions in + // the [alpha documentation](/speech/data-sharing). If you set `use_enhanced` + // to true and you have not enabled audio logging, then you will receive + // an error. + UseEnhanced bool `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced" json:"use_enhanced,omitempty"` + // *Optional* Contains settings to opt-in to allow Google to + // collect and use data from this request to improve Google's products and + // services. + GoogleDataCollectionOptIn *GoogleDataCollectionConfig `protobuf:"bytes,10,opt,name=google_data_collection_opt_in,json=googleDataCollectionOptIn" json:"google_data_collection_opt_in,omitempty"` } func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} } @@ -507,6 +586,37 @@ func (m *RecognitionConfig) GetModel() string { return "" } +func (m *RecognitionConfig) GetUseEnhanced() bool { + if m != nil { + return m.UseEnhanced + } + return false +} + +func (m *RecognitionConfig) GetGoogleDataCollectionOptIn() *GoogleDataCollectionConfig { + if m != nil { + return m.GoogleDataCollectionOptIn + } + return nil +} + +// Google data collection opt-in settings. +type GoogleDataCollectionConfig struct { + LoggingConsentState GoogleDataCollectionConfig_LoggingConsentState `protobuf:"varint,1,opt,name=logging_consent_state,json=loggingConsentState,enum=google.cloud.speech.v1p1beta1.GoogleDataCollectionConfig_LoggingConsentState" json:"logging_consent_state,omitempty"` +} + +func (m *GoogleDataCollectionConfig) Reset() { *m = GoogleDataCollectionConfig{} } +func (m *GoogleDataCollectionConfig) String() string { return proto.CompactTextString(m) } +func (*GoogleDataCollectionConfig) ProtoMessage() {} +func (*GoogleDataCollectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GoogleDataCollectionConfig) GetLoggingConsentState() GoogleDataCollectionConfig_LoggingConsentState { + if m != nil { + return m.LoggingConsentState + } + return GoogleDataCollectionConfig_ENABLED +} + // Provides "hints" to the speech recognizer to favor specific words and phrases // in the results. type SpeechContext struct { @@ -522,7 +632,7 @@ type SpeechContext struct { func (m *SpeechContext) Reset() { *m = SpeechContext{} } func (m *SpeechContext) String() string { return proto.CompactTextString(m) } func (*SpeechContext) ProtoMessage() {} -func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *SpeechContext) GetPhrases() []string { if m != nil { @@ -548,7 +658,7 @@ type RecognitionAudio struct { func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} } func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) } func (*RecognitionAudio) ProtoMessage() {} -func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*RecognitionAudio) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } type isRecognitionAudio_AudioSource interface { isRecognitionAudio_AudioSource() @@ -663,7 +773,7 @@ type RecognizeResponse struct { func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} } func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) } func (*RecognizeResponse) ProtoMessage() {} -func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*RecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult { if m != nil { @@ -686,7 +796,7 @@ type LongRunningRecognizeResponse struct { func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} } func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) } func (*LongRunningRecognizeResponse) ProtoMessage() {} -func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult { if m != nil { @@ -711,7 +821,7 @@ type LongRunningRecognizeMetadata struct { func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} } func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) } func (*LongRunningRecognizeMetadata) ProtoMessage() {} -func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 { if m != nil { @@ -799,7 +909,7 @@ type StreamingRecognizeResponse struct { func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} } func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) } func (*StreamingRecognizeResponse) ProtoMessage() {} -func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *StreamingRecognizeResponse) GetError() *google_rpc.Status { if m != nil { @@ -847,7 +957,7 @@ type StreamingRecognitionResult struct { func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} } func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) } func (*StreamingRecognitionResult) ProtoMessage() {} -func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { if m != nil { @@ -882,7 +992,7 @@ type SpeechRecognitionResult struct { func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } func (*SpeechRecognitionResult) ProtoMessage() {} -func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative { if m != nil { @@ -910,7 +1020,7 @@ type SpeechRecognitionAlternative struct { func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } func (*SpeechRecognitionAlternative) ProtoMessage() {} -func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *SpeechRecognitionAlternative) GetTranscript() string { if m != nil { @@ -956,7 +1066,7 @@ type WordInfo struct { func (m *WordInfo) Reset() { *m = WordInfo{} } func (m *WordInfo) String() string { return proto.CompactTextString(m) } func (*WordInfo) ProtoMessage() {} -func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *WordInfo) GetStartTime() *google_protobuf3.Duration { if m != nil { @@ -985,6 +1095,7 @@ func init() { proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest") proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig") proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig") + proto.RegisterType((*GoogleDataCollectionConfig)(nil), "google.cloud.speech.v1p1beta1.GoogleDataCollectionConfig") proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext") proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio") proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse") @@ -996,6 +1107,7 @@ func init() { proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative") proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo") proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value) + proto.RegisterEnum("google.cloud.speech.v1p1beta1.GoogleDataCollectionConfig_LoggingConsentState", GoogleDataCollectionConfig_LoggingConsentState_name, GoogleDataCollectionConfig_LoggingConsentState_value) proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value) } @@ -1189,89 +1301,98 @@ var _Speech_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1343 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xbb, 0x6f, 0x5b, 0xb7, - 0x1a, 0xf7, 0xb1, 0x2c, 0x3f, 0xbe, 0xf8, 0x21, 0xf3, 0xe6, 0xde, 0xc8, 0x8a, 0x93, 0xeb, 0x7b, - 0x82, 0x9b, 0x38, 0x69, 0x21, 0xd9, 0x6e, 0x91, 0xe6, 0x81, 0x16, 0x90, 0xe5, 0x63, 0x4b, 0x80, - 0x2d, 0x1b, 0xb4, 0x5c, 0xb7, 0x59, 0x08, 0x5a, 0xa2, 0x94, 0x03, 0x48, 0xe4, 0x29, 0x49, 0xa5, - 0xb1, 0xc7, 0xae, 0x45, 0xbb, 0x14, 0xe8, 0xd6, 0xa9, 0x9d, 0x3b, 0x76, 0xe8, 0xd2, 0x3d, 0x6b, - 0x97, 0x8e, 0x1d, 0xfa, 0x47, 0x74, 0x2c, 0x48, 0x9e, 0x23, 0x4b, 0x7e, 0x27, 0x68, 0x80, 0x6e, - 0x87, 0xbf, 0xef, 0xc1, 0x1f, 0x3f, 0x7e, 0x0f, 0x1e, 0x58, 0x6a, 0x09, 0xd1, 0x6a, 0xb3, 0x42, - 0xbd, 0x2d, 0xba, 0x8d, 0x82, 0x8a, 0x18, 0xab, 0x3f, 0x2f, 0xbc, 0x58, 0x8e, 0x96, 0x0f, 0x98, - 0xa6, 0xcb, 0x0e, 0x26, 0x0e, 0xce, 0x47, 0x52, 0x68, 0x81, 0x6e, 0x39, 0x8b, 0xbc, 0x15, 0xe5, - 0x63, 0x51, 0xcf, 0x22, 0x37, 0x1f, 0x3b, 0xa4, 0x51, 0x58, 0xa0, 0x9c, 0x0b, 0x4d, 0x75, 0x28, - 0xb8, 0x72, 0xc6, 0xb9, 0x3b, 0xb1, 0xb4, 0x2d, 0x78, 0x4b, 0x76, 0x39, 0x0f, 0x79, 0xab, 0x20, - 0x22, 0x26, 0x07, 0x94, 0xe6, 0x62, 0x25, 0xbb, 0x3a, 0xe8, 0x36, 0x0b, 0x94, 0x1f, 0xc6, 0xa2, - 0xdb, 0x27, 0x45, 0x8d, 0xae, 0xb3, 0x8d, 0xe5, 0xff, 0x3d, 0x29, 0xd7, 0x61, 0x87, 0x29, 0x4d, - 0x3b, 0x51, 0xac, 0x70, 0x23, 0x56, 0x90, 0x51, 0xbd, 0xa0, 0x34, 0xd5, 0xdd, 0x78, 0x53, 0xff, - 0x7b, 0x0f, 0x32, 0x98, 0xd5, 0x45, 0x8b, 0x87, 0x47, 0x0c, 0xb3, 0xcf, 0xba, 0x4c, 0x69, 0x54, - 0x86, 0xd1, 0xba, 0xe0, 0xcd, 0xb0, 0x95, 0xf5, 0x16, 0xbc, 0xc5, 0x6b, 0x2b, 0x4b, 0xf9, 0x0b, - 0x0f, 0x9f, 0x8f, 0x1d, 0x18, 0x42, 0x25, 0x6b, 0x87, 0x63, 0x7b, 0x14, 0x40, 0x9a, 0x76, 0x1b, - 0xa1, 0xc8, 0x0e, 0x5b, 0x47, 0x85, 0xab, 0x3b, 0x2a, 0x1a, 0x33, 0xec, 0xac, 0xfd, 0x1f, 0x3d, - 0xb8, 0xb9, 0x29, 0x78, 0x0b, 0xbb, 0xd8, 0xfd, 0xf3, 0x09, 0xff, 0xe2, 0xc1, 0xdc, 0xae, 0x96, - 0x8c, 0x76, 0xce, 0xa2, 0xdb, 0x84, 0x8c, 0x4a, 0x84, 0x64, 0x80, 0xf8, 0xe3, 0x4b, 0xf6, 0x3b, - 0xe9, 0xf3, 0xf8, 0x04, 0xe5, 0x21, 0x3c, 0xd3, 0x73, 0xea, 0x20, 0xf4, 0x7f, 0x98, 0xb2, 0x74, - 0xcc, 0x1e, 0x9a, 0x71, 0x6d, 0x0f, 0x35, 0x59, 0x1e, 0xc2, 0x93, 0x16, 0x2e, 0x39, 0x74, 0xf5, - 0x5f, 0x30, 0x7b, 0x4c, 0x47, 0x3a, 0x8e, 0xfe, 0xcf, 0x1e, 0xe4, 0xce, 0xdf, 0xed, 0x6f, 0x8c, - 0xf8, 0x7d, 0xc8, 0xa8, 0x90, 0xb7, 0xda, 0x8c, 0x74, 0xb5, 0x66, 0x92, 0xf2, 0x3a, 0xb3, 0x3c, - 0xc7, 0xf1, 0x8c, 0xc3, 0xf7, 0x12, 0x18, 0xdd, 0x83, 0x99, 0x90, 0x6b, 0x26, 0xc3, 0x0e, 0x91, - 0x4c, 0x75, 0xdb, 0x5a, 0x65, 0x53, 0x56, 0x73, 0x3a, 0x86, 0xb1, 0x43, 0xfd, 0x57, 0x23, 0x30, - 0x7b, 0x9a, 0xf3, 0x33, 0x18, 0x67, 0xbc, 0x2e, 0x1a, 0x21, 0x77, 0xac, 0xa7, 0x57, 0x3e, 0x7a, - 0x5d, 0xd6, 0x79, 0x7b, 0xcb, 0x41, 0xec, 0x05, 0xf7, 0xfc, 0xa1, 0x07, 0x30, 0xab, 0x68, 0x27, - 0x6a, 0x33, 0x22, 0xa9, 0x66, 0xe4, 0x39, 0x93, 0xfa, 0xc8, 0x1e, 0x23, 0x8d, 0x67, 0x9c, 0x00, - 0x53, 0xcd, 0xca, 0x06, 0x46, 0x77, 0x60, 0xaa, 0x4d, 0x79, 0xab, 0x4b, 0x5b, 0x8c, 0xd4, 0x45, - 0x83, 0xd9, 0x43, 0x4c, 0xe0, 0xc9, 0x04, 0x2c, 0x89, 0x06, 0x33, 0x61, 0xe9, 0xd0, 0x97, 0x84, - 0xb6, 0x35, 0x93, 0x9c, 0xea, 0xf0, 0x05, 0x53, 0xd9, 0x11, 0xe7, 0xaf, 0x43, 0x5f, 0x16, 0xfb, - 0x60, 0xa3, 0x1a, 0x49, 0xd1, 0xa4, 0x3c, 0xd4, 0x87, 0xa4, 0x19, 0x1a, 0x51, 0x36, 0xed, 0x22, - 0xd8, 0xc3, 0xd7, 0x2d, 0x8c, 0xf6, 0x60, 0xc6, 0x1d, 0xd2, 0xa5, 0xc4, 0x4b, 0xad, 0xb2, 0xa3, - 0x0b, 0xa9, 0xc5, 0x6b, 0x2b, 0xef, 0x5e, 0x96, 0x78, 0x16, 0x28, 0x39, 0x23, 0x3c, 0xad, 0xfa, - 0x97, 0x0a, 0x7d, 0x00, 0x59, 0xc6, 0xe9, 0x41, 0x9b, 0x91, 0xcf, 0x85, 0x6c, 0x10, 0xd3, 0x7d, - 0x88, 0x68, 0x36, 0x15, 0xd3, 0x2a, 0x3b, 0x6e, 0x99, 0xfc, 0xdb, 0xc9, 0xf7, 0x85, 0x6c, 0xd4, - 0xc2, 0x0e, 0xdb, 0x76, 0x42, 0x74, 0x1d, 0xd2, 0x1d, 0xd1, 0x60, 0xed, 0xec, 0x94, 0x0d, 0x81, - 0x5b, 0xf8, 0x5f, 0x7a, 0x30, 0x35, 0x10, 0x68, 0x94, 0x85, 0xeb, 0x41, 0xb5, 0xb4, 0xbd, 0x56, - 0xa9, 0x6e, 0x90, 0xbd, 0xea, 0xee, 0x4e, 0x50, 0xaa, 0xac, 0x57, 0x82, 0xb5, 0xcc, 0x10, 0x9a, - 0x84, 0xf1, 0xcd, 0x4a, 0x35, 0x28, 0xe2, 0xe5, 0x87, 0x19, 0x0f, 0x8d, 0xc3, 0xc8, 0xfa, 0x66, - 0xb1, 0x94, 0x19, 0x46, 0x13, 0x90, 0xde, 0xda, 0xdb, 0x2c, 0xee, 0x67, 0x52, 0x68, 0x0c, 0x52, - 0xc5, 0x2d, 0x9c, 0x19, 0x41, 0x00, 0xa3, 0xc5, 0x2d, 0x4c, 0xf6, 0x57, 0x33, 0x69, 0x63, 0xb7, - 0xbd, 0xb1, 0x41, 0xb6, 0x77, 0xf6, 0x76, 0x33, 0xa3, 0x28, 0x07, 0xff, 0xd9, 0xdd, 0x09, 0x82, - 0x4f, 0xc8, 0x7e, 0xa5, 0x56, 0x26, 0xe5, 0xa0, 0xb8, 0x16, 0x60, 0xb2, 0xfa, 0x69, 0x2d, 0xc8, - 0x8c, 0xf9, 0xf7, 0x61, 0x6a, 0xe0, 0xf4, 0x28, 0x0b, 0x63, 0xd1, 0x73, 0x49, 0x15, 0x53, 0x59, - 0x6f, 0x21, 0xb5, 0x38, 0x81, 0x93, 0xa5, 0x8f, 0x7b, 0xcd, 0xb4, 0xd7, 0x11, 0x50, 0x0e, 0xc6, - 0x92, 0xf2, 0xf3, 0xe2, 0xf2, 0x4b, 0x00, 0x84, 0x20, 0xd5, 0x95, 0xa1, 0xcd, 0x93, 0x89, 0xf2, - 0x10, 0x36, 0x8b, 0xd5, 0x69, 0x70, 0xd5, 0x49, 0x94, 0xe8, 0xca, 0x3a, 0xf3, 0x59, 0x2f, 0x95, - 0x4d, 0x03, 0x51, 0x91, 0xe0, 0x8a, 0xa1, 0x1d, 0x18, 0x4b, 0x2a, 0x60, 0xd8, 0xde, 0xdf, 0xc3, - 0x2b, 0xdd, 0x5f, 0x1f, 0x39, 0x57, 0x2a, 0x38, 0x71, 0xe3, 0x47, 0x30, 0x7f, 0x76, 0x87, 0x7d, - 0x6b, 0x3b, 0xbe, 0xf2, 0xce, 0xde, 0x72, 0x8b, 0x69, 0xda, 0xa0, 0x9a, 0xc6, 0x79, 0xdd, 0x92, - 0x4c, 0x29, 0x12, 0x31, 0x59, 0x4f, 0x42, 0x98, 0xb6, 0x79, 0x6d, 0xf1, 0x1d, 0x07, 0xa3, 0xc7, - 0x00, 0x4a, 0x53, 0xa9, 0x6d, 0xea, 0xc5, 0xbd, 0x3b, 0x97, 0x10, 0x4c, 0xa6, 0x62, 0xbe, 0x96, - 0x4c, 0x45, 0x3c, 0x61, 0xb5, 0xcd, 0x1a, 0xad, 0x41, 0xa6, 0x4d, 0x95, 0x26, 0xdd, 0xa8, 0x61, - 0x2a, 0xd7, 0x3a, 0x48, 0x5d, 0xea, 0x60, 0xda, 0xd8, 0xec, 0x59, 0x13, 0x03, 0xfa, 0xbf, 0x0f, - 0x9f, 0x6e, 0x97, 0x7d, 0xd1, 0x5b, 0x84, 0x34, 0x93, 0x52, 0xc8, 0xb8, 0x5b, 0xa2, 0xc4, 0xb3, - 0x8c, 0xea, 0xf9, 0x5d, 0x3b, 0x8f, 0xb1, 0x53, 0x40, 0xbb, 0x27, 0xe3, 0xfc, 0x26, 0x23, 0xe1, - 0x44, 0xa8, 0x51, 0x17, 0x66, 0xe3, 0xb2, 0x67, 0x2f, 0x18, 0xd7, 0x44, 0x1f, 0x46, 0xcc, 0x76, - 0x93, 0xe9, 0x95, 0xca, 0x6b, 0xba, 0x3f, 0x3e, 0x54, 0x7c, 0xc3, 0x81, 0xf1, 0x58, 0x3b, 0x8c, - 0x18, 0x8e, 0x5b, 0x4b, 0x0f, 0xf0, 0x37, 0x61, 0xe6, 0x84, 0x0e, 0x9a, 0x87, 0xac, 0x29, 0xb4, - 0x52, 0x99, 0x04, 0x1f, 0x07, 0xd5, 0xda, 0x89, 0x62, 0xbe, 0x09, 0x37, 0x82, 0xea, 0x1a, 0xd9, - 0x5e, 0x27, 0xbb, 0x95, 0xea, 0xc6, 0x66, 0x40, 0xf6, 0x6a, 0xb5, 0x00, 0x17, 0xab, 0xa5, 0x20, - 0xe3, 0xf9, 0x3f, 0x9d, 0x33, 0x91, 0xdc, 0x61, 0x11, 0x81, 0xc9, 0x81, 0x66, 0xe9, 0xd9, 0xe8, - 0x3d, 0x7d, 0xdd, 0x2c, 0xed, 0xeb, 0xac, 0x78, 0xc0, 0x21, 0x9a, 0x83, 0xf1, 0x50, 0x91, 0x66, - 0xc8, 0x69, 0x3b, 0x1e, 0x50, 0x63, 0xa1, 0x5a, 0x37, 0x4b, 0x34, 0x0f, 0x26, 0xa1, 0x0e, 0xc2, - 0x76, 0xa8, 0x0f, 0x6d, 0xf2, 0x0c, 0xe3, 0x63, 0xc0, 0x3f, 0x82, 0x1b, 0xe7, 0x14, 0xc3, 0x5b, - 0x27, 0xed, 0x7f, 0xe7, 0xc1, 0xfc, 0x45, 0xea, 0xe8, 0x36, 0x80, 0x96, 0x94, 0xab, 0xba, 0x0c, - 0x23, 0x57, 0x5e, 0x13, 0xb8, 0x0f, 0x31, 0x72, 0x3b, 0xa8, 0x1b, 0x2c, 0x19, 0xcc, 0xc3, 0xb8, - 0x0f, 0x41, 0x1f, 0x42, 0xda, 0xf4, 0x7c, 0x33, 0x89, 0x0d, 0xf5, 0x7b, 0x97, 0x50, 0x37, 0x03, - 0xa0, 0xc2, 0x9b, 0x02, 0x3b, 0x2b, 0xff, 0x6b, 0x0f, 0xc6, 0x13, 0x0c, 0x3d, 0x1a, 0xa8, 0x62, - 0x57, 0x2a, 0x73, 0xa7, 0x8a, 0x70, 0x2d, 0x7e, 0xfb, 0xf6, 0x17, 0xf1, 0xfb, 0x66, 0xb4, 0x37, - 0xfa, 0xab, 0xff, 0x02, 0xbb, 0x31, 0xc6, 0xed, 0x10, 0x42, 0x08, 0x46, 0x0c, 0x8b, 0x78, 0xfe, - 0xda, 0xef, 0x95, 0xdf, 0x52, 0x30, 0xea, 0x02, 0x86, 0xbe, 0xf5, 0x60, 0xa2, 0x97, 0xf5, 0xe8, - 0x8a, 0x4f, 0xc1, 0xde, 0x2b, 0x2f, 0xb7, 0x74, 0x75, 0x03, 0x57, 0x50, 0xfe, 0xdd, 0x2f, 0x7e, - 0xfd, 0xe3, 0x9b, 0xe1, 0x05, 0xff, 0x66, 0xdf, 0xaf, 0x88, 0x33, 0x7b, 0x22, 0x13, 0xe5, 0x27, - 0xde, 0x03, 0xf4, 0x83, 0x07, 0xd7, 0xcf, 0xea, 0x9c, 0xe8, 0xc9, 0x25, 0x5b, 0x5e, 0xf0, 0x86, - 0xce, 0xdd, 0x4a, 0x6c, 0xfb, 0x7e, 0x52, 0xf2, 0xdb, 0xc9, 0x4f, 0x8a, 0xbf, 0x6c, 0xb9, 0xbd, - 0xe3, 0xdf, 0x3d, 0xcd, 0xad, 0xcf, 0x60, 0x80, 0xe6, 0x57, 0x1e, 0xa0, 0xd3, 0xed, 0x03, 0x3d, - 0x7a, 0x83, 0x8e, 0xe3, 0x28, 0x3e, 0x7e, 0xe3, 0x5e, 0xb5, 0xe8, 0x2d, 0x79, 0xab, 0x47, 0xf0, - 0xbf, 0xba, 0xe8, 0x5c, 0xec, 0x63, 0xf5, 0x9a, 0xbb, 0xfc, 0x1d, 0x93, 0x35, 0x3b, 0xde, 0xb3, - 0x52, 0xac, 0xdd, 0x12, 0xe6, 0x75, 0x96, 0x17, 0xb2, 0x55, 0x68, 0x31, 0x6e, 0x73, 0xaa, 0xe0, - 0x44, 0x34, 0x0a, 0xd5, 0x39, 0xff, 0x91, 0x4f, 0x1d, 0xf0, 0xa7, 0xe7, 0x1d, 0x8c, 0x5a, 0x93, - 0xf7, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x7b, 0x24, 0x37, 0x79, 0x0e, 0x00, 0x00, + // 1477 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xbd, 0x6f, 0x1b, 0x47, + 0x16, 0xd7, 0x8a, 0xa2, 0x28, 0x3d, 0x7d, 0x51, 0x63, 0xfb, 0x4c, 0xd1, 0xb2, 0x4f, 0x5e, 0xe3, + 0x6c, 0xd9, 0x77, 0x20, 0x25, 0xdd, 0xc1, 0xe7, 0x0f, 0xdc, 0x01, 0x14, 0xb9, 0x12, 0x09, 0x50, + 0x94, 0x30, 0xa4, 0x4e, 0x77, 0x6e, 0x06, 0x23, 0x72, 0xb8, 0x5a, 0x60, 0xb9, 0xbb, 0xb7, 0x33, + 0xeb, 0x58, 0x4a, 0x95, 0xb4, 0x41, 0xd2, 0x04, 0x48, 0x97, 0x2a, 0xa9, 0x53, 0xa6, 0x48, 0x93, + 0x3e, 0x65, 0xd2, 0xa4, 0x4c, 0x91, 0x2a, 0x7f, 0x41, 0xca, 0x60, 0x66, 0x76, 0x29, 0x52, 0xdf, + 0x16, 0x62, 0x20, 0xdd, 0xce, 0xef, 0x7d, 0xcc, 0x9b, 0xb7, 0xf3, 0x7e, 0xef, 0x0d, 0xac, 0xd8, + 0xbe, 0x6f, 0xbb, 0xac, 0xd8, 0x76, 0xfd, 0xa8, 0x53, 0xe4, 0x01, 0x63, 0xed, 0x83, 0xe2, 0xeb, + 0xd5, 0x60, 0x75, 0x9f, 0x09, 0xba, 0xaa, 0x61, 0xa2, 0xe1, 0x42, 0x10, 0xfa, 0xc2, 0x47, 0x77, + 0xb5, 0x45, 0x41, 0x89, 0x0a, 0xb1, 0xa8, 0x6f, 0x91, 0x5f, 0x8c, 0x1d, 0xd2, 0xc0, 0x29, 0x52, + 0xcf, 0xf3, 0x05, 0x15, 0x8e, 0xef, 0x71, 0x6d, 0x9c, 0x7f, 0x10, 0x4b, 0x5d, 0xdf, 0xb3, 0xc3, + 0xc8, 0xf3, 0x1c, 0xcf, 0x2e, 0xfa, 0x01, 0x0b, 0x87, 0x94, 0x16, 0x62, 0x25, 0xb5, 0xda, 0x8f, + 0xba, 0x45, 0xea, 0x1d, 0xc6, 0xa2, 0x7b, 0x27, 0x45, 0x9d, 0x48, 0xdb, 0xc6, 0xf2, 0x3f, 0x9f, + 0x94, 0x0b, 0xa7, 0xc7, 0xb8, 0xa0, 0xbd, 0x20, 0x56, 0xb8, 0x1d, 0x2b, 0x84, 0x41, 0xbb, 0xc8, + 0x05, 0x15, 0x51, 0xbc, 0xa9, 0xf9, 0x85, 0x01, 0x59, 0xcc, 0xda, 0xbe, 0xed, 0x39, 0x47, 0x0c, + 0xb3, 0xff, 0x47, 0x8c, 0x0b, 0x54, 0x85, 0xf1, 0xb6, 0xef, 0x75, 0x1d, 0x3b, 0x67, 0x2c, 0x19, + 0xcb, 0x53, 0x6b, 0x2b, 0x85, 0x0b, 0x0f, 0x5f, 0x88, 0x1d, 0xc8, 0x80, 0xca, 0xca, 0x0e, 0xc7, + 0xf6, 0xc8, 0x82, 0x34, 0x8d, 0x3a, 0x8e, 0x9f, 0x1b, 0x55, 0x8e, 0x8a, 0x57, 0x77, 0x54, 0x92, + 0x66, 0x58, 0x5b, 0x9b, 0x5f, 0x19, 0x70, 0xa7, 0xee, 0x7b, 0x36, 0xd6, 0xb9, 0xfb, 0xe3, 0x07, + 0xfc, 0xad, 0x01, 0x0b, 0x4d, 0x11, 0x32, 0xda, 0x3b, 0x2b, 0xdc, 0x2e, 0x64, 0x79, 0x22, 0x24, + 0x43, 0x81, 0x3f, 0xbf, 0x64, 0xbf, 0x93, 0x3e, 0x8f, 0x4f, 0x50, 0x1d, 0xc1, 0x73, 0x7d, 0xa7, + 0x1a, 0x42, 0x7f, 0x81, 0x19, 0x15, 0x8e, 0xdc, 0x43, 0x30, 0x4f, 0xa8, 0x43, 0x4d, 0x57, 0x47, + 0xf0, 0xb4, 0x82, 0xcb, 0x1a, 0x5d, 0xbf, 0x01, 0xf3, 0xc7, 0xe1, 0x84, 0x3a, 0x46, 0xf3, 0x1b, + 0x03, 0xf2, 0xe7, 0xef, 0xf6, 0x3b, 0x66, 0xfc, 0x31, 0x64, 0xb9, 0xe3, 0xd9, 0x2e, 0x23, 0x91, + 0x10, 0x2c, 0xa4, 0x5e, 0x9b, 0xa9, 0x38, 0x27, 0xf0, 0x9c, 0xc6, 0x77, 0x13, 0x18, 0x3d, 0x82, + 0x39, 0xc7, 0x13, 0x2c, 0x74, 0x7a, 0x24, 0x64, 0x3c, 0x72, 0x05, 0xcf, 0xa5, 0x94, 0xe6, 0x6c, + 0x0c, 0x63, 0x8d, 0x9a, 0xbf, 0xa4, 0x61, 0xfe, 0x74, 0xcc, 0xaf, 0x60, 0x82, 0x79, 0x6d, 0xbf, + 0xe3, 0x78, 0x3a, 0xea, 0xd9, 0xb5, 0x7f, 0xbf, 0x6d, 0xd4, 0x05, 0xf5, 0x97, 0xad, 0xd8, 0x0b, + 0xee, 0xfb, 0x43, 0x4f, 0x60, 0x9e, 0xd3, 0x5e, 0xe0, 0x32, 0x12, 0x52, 0xc1, 0xc8, 0x01, 0x0b, + 0xc5, 0x91, 0x3a, 0x46, 0x1a, 0xcf, 0x69, 0x01, 0xa6, 0x82, 0x55, 0x25, 0x8c, 0x1e, 0xc0, 0x8c, + 0x4b, 0x3d, 0x3b, 0xa2, 0x36, 0x23, 0x6d, 0xbf, 0xc3, 0xd4, 0x21, 0x26, 0xf1, 0x74, 0x02, 0x96, + 0xfd, 0x0e, 0x93, 0x69, 0xe9, 0xd1, 0x37, 0x84, 0xba, 0x82, 0x85, 0x1e, 0x15, 0xce, 0x6b, 0xc6, + 0x73, 0x63, 0xda, 0x5f, 0x8f, 0xbe, 0x29, 0x0d, 0xc0, 0x52, 0x35, 0x08, 0xfd, 0x2e, 0xf5, 0x1c, + 0x71, 0x48, 0xba, 0x8e, 0x14, 0xe5, 0xd2, 0x3a, 0x83, 0x7d, 0x7c, 0x43, 0xc1, 0x68, 0x17, 0xe6, + 0xf4, 0x21, 0xf5, 0x95, 0x78, 0x23, 0x78, 0x6e, 0x7c, 0x29, 0xb5, 0x3c, 0xb5, 0xf6, 0xb7, 0xcb, + 0x2e, 0x9e, 0x02, 0xca, 0xda, 0x08, 0xcf, 0xf2, 0xc1, 0x25, 0x47, 0xff, 0x84, 0x1c, 0xf3, 0xe8, + 0xbe, 0xcb, 0xc8, 0x7b, 0x7e, 0xd8, 0x21, 0x92, 0x7d, 0x88, 0xdf, 0xed, 0x72, 0x26, 0x78, 0x6e, + 0x42, 0x45, 0x72, 0x4b, 0xcb, 0xf7, 0xfc, 0xb0, 0xd3, 0x72, 0x7a, 0x6c, 0x5b, 0x0b, 0xd1, 0x4d, + 0x48, 0xf7, 0xfc, 0x0e, 0x73, 0x73, 0x33, 0x2a, 0x05, 0x7a, 0x81, 0xee, 0xc3, 0x74, 0xc4, 0x19, + 0x61, 0xde, 0x81, 0xfc, 0xed, 0x9d, 0xdc, 0xac, 0x72, 0x31, 0x15, 0x71, 0x66, 0xc5, 0x10, 0x7a, + 0x1f, 0x62, 0x42, 0x26, 0x1d, 0x2a, 0x28, 0x69, 0xfb, 0xae, 0xcb, 0xda, 0xf2, 0x3f, 0x11, 0x3f, + 0x10, 0xc4, 0xf1, 0x72, 0x70, 0xa5, 0x7a, 0xda, 0x54, 0xd2, 0x0a, 0x15, 0xb4, 0xdc, 0xf7, 0x10, + 0xdf, 0xcf, 0x98, 0x8e, 0x87, 0x65, 0xdb, 0x81, 0xa8, 0x79, 0xe6, 0x47, 0x06, 0xcc, 0x0c, 0x5d, + 0x04, 0x94, 0x83, 0x9b, 0x56, 0xa3, 0xbc, 0x5d, 0xa9, 0x35, 0x36, 0xc9, 0x6e, 0xa3, 0xb9, 0x63, + 0x95, 0x6b, 0x1b, 0x35, 0xab, 0x92, 0x1d, 0x41, 0xd3, 0x30, 0x51, 0xaf, 0x35, 0xac, 0x12, 0x5e, + 0x7d, 0x9a, 0x35, 0xd0, 0x04, 0x8c, 0x6d, 0xd4, 0x4b, 0xe5, 0xec, 0x28, 0x9a, 0x84, 0xf4, 0xd6, + 0x6e, 0xbd, 0xb4, 0x97, 0x4d, 0xa1, 0x0c, 0xa4, 0x4a, 0x5b, 0x38, 0x3b, 0x86, 0x00, 0xc6, 0x4b, + 0x5b, 0x98, 0xec, 0xad, 0x67, 0xd3, 0xd2, 0x6e, 0x7b, 0x73, 0x93, 0x6c, 0xef, 0xec, 0x36, 0xb3, + 0xe3, 0x28, 0x0f, 0x7f, 0x6a, 0xee, 0x58, 0xd6, 0x7f, 0xc9, 0x5e, 0xad, 0x55, 0x25, 0x55, 0xab, + 0x54, 0xb1, 0x30, 0x59, 0xff, 0x5f, 0xcb, 0xca, 0x66, 0xcc, 0xef, 0x0d, 0xc8, 0x9f, 0x7f, 0x0e, + 0xf4, 0x81, 0x01, 0xb7, 0x5c, 0xdf, 0xb6, 0x63, 0xae, 0xe1, 0xcc, 0x13, 0x44, 0xb6, 0x00, 0x16, + 0xd7, 0xc0, 0xd6, 0xb5, 0x53, 0x54, 0xa8, 0x6b, 0xb7, 0x65, 0xed, 0xb5, 0x29, 0x9d, 0xe2, 0x1b, + 0xee, 0x69, 0xd0, 0x5c, 0x81, 0x1b, 0x67, 0xe8, 0xa2, 0x29, 0xc8, 0x58, 0x8d, 0xd2, 0x7a, 0x3d, + 0x49, 0x54, 0xa5, 0xd6, 0xd4, 0x2b, 0xc3, 0x7c, 0x0c, 0x33, 0x43, 0x57, 0x0e, 0xe5, 0x20, 0x13, + 0x1c, 0x84, 0x94, 0x33, 0x9e, 0x33, 0x96, 0x52, 0xcb, 0x93, 0x38, 0x59, 0x9a, 0xb8, 0xdf, 0xc1, + 0xfa, 0x34, 0x8c, 0xf2, 0x90, 0x49, 0x38, 0xcf, 0x88, 0x39, 0x2f, 0x01, 0x10, 0x82, 0x54, 0x14, + 0x3a, 0xaa, 0x38, 0x27, 0xab, 0x23, 0x58, 0x2e, 0xd6, 0x67, 0x41, 0x53, 0x22, 0xe1, 0x7e, 0x14, + 0xb6, 0x99, 0xc9, 0xfa, 0xfc, 0x21, 0x59, 0x9b, 0x07, 0x32, 0x6a, 0xb4, 0x03, 0x99, 0x84, 0x76, + 0x46, 0x55, 0xd1, 0x3c, 0xbd, 0x52, 0xd1, 0x0c, 0x04, 0xa7, 0xf9, 0x09, 0x27, 0x6e, 0xcc, 0x00, + 0x16, 0xcf, 0x6e, 0x6b, 0xef, 0x6c, 0xc7, 0xef, 0x8c, 0xb3, 0xb7, 0xdc, 0x62, 0x82, 0xca, 0x4a, + 0x8a, 0xc9, 0xc4, 0x0e, 0x19, 0xe7, 0x24, 0x60, 0x61, 0x3b, 0x49, 0x61, 0x5a, 0x91, 0x89, 0xc2, + 0x77, 0x34, 0x8c, 0x9e, 0x03, 0x70, 0x41, 0x43, 0xa1, 0xea, 0x3d, 0x6e, 0x98, 0xf9, 0x24, 0xc0, + 0x64, 0x14, 0x29, 0xb4, 0x92, 0x51, 0x04, 0x4f, 0x2a, 0x6d, 0xb9, 0x46, 0x15, 0xc8, 0xba, 0x94, + 0x0b, 0x12, 0x05, 0x1d, 0x49, 0x97, 0xca, 0x41, 0xea, 0x52, 0x07, 0xb3, 0xd2, 0x66, 0x57, 0x99, + 0x48, 0xd0, 0xfc, 0x69, 0xf4, 0x74, 0x8f, 0x1a, 0xc8, 0xde, 0x32, 0xa4, 0x59, 0x18, 0xfa, 0x61, + 0xdc, 0xa2, 0x50, 0xe2, 0x39, 0x0c, 0xda, 0x85, 0xa6, 0x1a, 0x82, 0xb0, 0x56, 0x40, 0xcd, 0x93, + 0x79, 0xbe, 0x4e, 0x1f, 0x3e, 0x91, 0x6a, 0x14, 0xc1, 0x7c, 0xcc, 0xb5, 0xec, 0xb5, 0x2c, 0x3a, + 0x71, 0x18, 0x30, 0x45, 0xe1, 0xb3, 0x6b, 0xb5, 0xb7, 0x74, 0x7f, 0x7c, 0xa8, 0xf8, 0x0f, 0x5b, + 0xd2, 0x63, 0xeb, 0x30, 0x60, 0x38, 0xe6, 0xf3, 0x3e, 0x60, 0xd6, 0x61, 0xee, 0x84, 0x0e, 0x5a, + 0x84, 0x9c, 0x64, 0x8f, 0x72, 0x95, 0x58, 0xff, 0xb1, 0x1a, 0xad, 0x13, 0x0c, 0x75, 0x07, 0x6e, + 0x5b, 0x8d, 0x0a, 0xd9, 0xde, 0x20, 0xcd, 0x5a, 0x63, 0xb3, 0x6e, 0x91, 0xdd, 0x56, 0xcb, 0xc2, + 0xa5, 0x46, 0xd9, 0xca, 0x1a, 0xe6, 0xd7, 0xe7, 0x8c, 0x01, 0xfa, 0xb0, 0x88, 0xc0, 0xf4, 0x50, + 0x87, 0x32, 0x54, 0xf6, 0x5e, 0xbe, 0xed, 0x2d, 0x1d, 0x68, 0x67, 0x78, 0xc8, 0x21, 0x5a, 0x80, + 0x09, 0x87, 0x93, 0xae, 0xe3, 0x51, 0x37, 0x9e, 0x0a, 0x32, 0x0e, 0xdf, 0x90, 0x4b, 0xb4, 0x08, + 0xf2, 0x42, 0xed, 0x3b, 0xae, 0x23, 0x0e, 0xd5, 0xe5, 0x19, 0xc5, 0xc7, 0x80, 0x79, 0x04, 0xb7, + 0xcf, 0x29, 0x86, 0x77, 0x1e, 0xb4, 0xf9, 0xb9, 0x01, 0x8b, 0x17, 0xa9, 0xa3, 0x7b, 0x00, 0x22, + 0xa4, 0x1e, 0x6f, 0x87, 0x4e, 0xa0, 0xcb, 0x6b, 0x12, 0x0f, 0x20, 0x52, 0xae, 0xa6, 0xa3, 0x0e, + 0x4b, 0xa6, 0xa1, 0x51, 0x3c, 0x80, 0xa0, 0x7f, 0x41, 0x5a, 0x36, 0x5a, 0x39, 0xfe, 0xc8, 0xd0, + 0x1f, 0x5d, 0x12, 0xba, 0xec, 0xba, 0x35, 0xaf, 0xeb, 0x63, 0x6d, 0x65, 0x7e, 0x62, 0xc0, 0x44, + 0x82, 0xa1, 0x67, 0x43, 0x55, 0xac, 0x4b, 0x65, 0xe1, 0x54, 0x11, 0x56, 0xe2, 0x07, 0xc7, 0x60, + 0x11, 0xff, 0x43, 0xce, 0x53, 0x9d, 0xc1, 0xea, 0xbf, 0xc0, 0x2e, 0xc3, 0x3c, 0xd5, 0xf9, 0x11, + 0x82, 0x31, 0x19, 0x45, 0x3c, 0xf4, 0xa8, 0xef, 0xb5, 0x1f, 0x53, 0x30, 0xae, 0x13, 0x86, 0x3e, + 0x33, 0x60, 0xb2, 0x7f, 0xeb, 0xd1, 0x15, 0xe7, 0xef, 0xfe, 0x68, 0x9d, 0x5f, 0xb9, 0xba, 0x81, + 0x2e, 0x28, 0xf3, 0xe1, 0x87, 0x3f, 0xfc, 0xfc, 0xe9, 0xe8, 0x92, 0x79, 0x67, 0xe0, 0xfd, 0xa7, + 0xcd, 0x5e, 0x84, 0x89, 0xf2, 0x0b, 0xe3, 0x09, 0xfa, 0xd2, 0x80, 0x9b, 0x67, 0x31, 0x27, 0x7a, + 0x71, 0xc9, 0x96, 0x17, 0x3c, 0x5c, 0xf2, 0x77, 0x13, 0xdb, 0x81, 0x97, 0x61, 0x61, 0x3b, 0x79, + 0x19, 0x9a, 0xab, 0x2a, 0xb6, 0xbf, 0x9a, 0x0f, 0x4f, 0xc7, 0x36, 0x60, 0x30, 0x14, 0xe6, 0xc7, + 0x06, 0xa0, 0xd3, 0xf4, 0x81, 0x9e, 0x5d, 0x83, 0x71, 0x74, 0x88, 0xcf, 0xaf, 0xcd, 0x55, 0xcb, + 0xc6, 0x8a, 0xb1, 0x7e, 0x04, 0xf7, 0xdb, 0x7e, 0xef, 0x62, 0x1f, 0xeb, 0x53, 0xfa, 0xe7, 0xef, + 0xc8, 0x5b, 0xb3, 0x63, 0xbc, 0x2a, 0xc7, 0xda, 0xb6, 0x2f, 0x47, 0xe2, 0x82, 0x1f, 0xda, 0x45, + 0x9b, 0x79, 0xea, 0x4e, 0x15, 0xb5, 0x88, 0x06, 0x0e, 0x3f, 0xe7, 0xf1, 0xfe, 0x52, 0x03, 0xbf, + 0x1a, 0xc6, 0xfe, 0xb8, 0x32, 0xf9, 0xfb, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x88, 0x22, 0xc3, + 0x54, 0xee, 0x0f, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go index aefcc3d4e..8dd0f8e98 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go @@ -116,6 +116,15 @@ func (AudioEncoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, // The top-level message sent by the client for the `ListVoices` method. type ListVoicesRequest struct { + // Optional (but recommended) + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If + // specified, the ListVoices call will only return voices that can be used to + // synthesize this language_code. E.g. when specifying "en-NZ", you will get + // supported "en-*" voices; when specifying "no", you will get supported + // "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" + // will also get supported "cmn-*" voices; specifying "zh-hk" will also get + // supported "yue-*" voices. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` } func (m *ListVoicesRequest) Reset() { *m = ListVoicesRequest{} } @@ -123,6 +132,13 @@ func (m *ListVoicesRequest) String() string { return proto.CompactTex func (*ListVoicesRequest) ProtoMessage() {} func (*ListVoicesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *ListVoicesRequest) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + // The message returned to the client by the `ListVoices` method. type ListVoicesResponse struct { // The list of voices. @@ -615,58 +631,58 @@ var _TextToSpeech_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/texttospeech/v1beta1/cloud_tts.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 844 bytes of a gzipped FileDescriptorProto + // 846 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xee, 0xd8, 0x71, 0xda, 0xbc, 0x5e, 0x3b, 0xce, 0x10, 0x51, 0x13, 0x51, 0xe4, 0x6e, 0xa8, - 0x64, 0xe5, 0x60, 0x63, 0x97, 0x0f, 0x91, 0x1e, 0xc0, 0xb1, 0xb7, 0xae, 0x25, 0x7f, 0x31, 0x9b, - 0xa4, 0x12, 0x97, 0xd5, 0x64, 0x3d, 0x6c, 0x56, 0xac, 0x67, 0x16, 0xcf, 0x6c, 0x54, 0x7a, 0x44, - 0x9c, 0x39, 0xc0, 0x5f, 0xe0, 0x07, 0xf0, 0x5b, 0x40, 0xe2, 0x17, 0xf0, 0x0f, 0xb8, 0x70, 0x44, - 0x33, 0xbb, 0x71, 0x1d, 0x07, 0x51, 0x97, 0x03, 0xb7, 0x9d, 0x67, 0xfc, 0x3c, 0xf3, 0xbe, 0xcf, - 0x3c, 0x7e, 0x07, 0x5a, 0x81, 0x10, 0x41, 0xc4, 0x9a, 0x7e, 0x24, 0x92, 0x59, 0x53, 0xb1, 0x17, - 0x4a, 0x09, 0x19, 0x33, 0xe6, 0x5f, 0x36, 0xaf, 0x5a, 0x17, 0x4c, 0xd1, 0x56, 0xba, 0xe5, 0x29, - 0x25, 0x1b, 0xf1, 0x42, 0x28, 0x81, 0x1f, 0xa6, 0x94, 0x86, 0xc1, 0x1b, 0xab, 0x94, 0x46, 0x46, - 0x39, 0x78, 0x37, 0x53, 0xa5, 0x71, 0xd8, 0xa4, 0x9c, 0x0b, 0x45, 0x55, 0x28, 0x78, 0x26, 0x60, - 0xbf, 0x05, 0x7b, 0xc3, 0x50, 0xaa, 0x73, 0x11, 0xfa, 0x4c, 0x12, 0xf6, 0x4d, 0xc2, 0xa4, 0xb2, - 0xcf, 0x01, 0xaf, 0x82, 0x32, 0x16, 0x5c, 0x32, 0xfc, 0x39, 0x6c, 0x5f, 0x19, 0xa4, 0x8a, 0x6a, - 0xf9, 0x7a, 0xb1, 0x5d, 0x6f, 0xbc, 0xf6, 0xf0, 0x86, 0x91, 0x20, 0x19, 0xcf, 0xfe, 0x15, 0x41, - 0xc1, 0x20, 0xf8, 0x11, 0x94, 0x23, 0xca, 0x83, 0x84, 0x06, 0xcc, 0xf3, 0xc5, 0x2c, 0xd3, 0xdc, - 0x21, 0xa5, 0x6b, 0xb4, 0xab, 0x41, 0x8c, 0x61, 0x8b, 0xd3, 0x39, 0xab, 0xe6, 0x6a, 0xa8, 0xbe, - 0x43, 0xcc, 0x37, 0x76, 0xa1, 0x28, 0xe5, 0x3c, 0xf2, 0x02, 0xc6, 0x67, 0x6c, 0x51, 0xcd, 0xd7, - 0x50, 0xbd, 0xdc, 0x6e, 0x6f, 0x50, 0x8b, 0x2b, 0xe7, 0x91, 0x39, 0xbd, 0x6f, 0x98, 0x04, 0xb4, - 0x4c, 0xfa, 0x8d, 0x3f, 0x85, 0x77, 0x38, 0x55, 0xc9, 0x82, 0x46, 0x9e, 0xa4, 0xf3, 0x38, 0x62, - 0xde, 0x82, 0x2a, 0xe6, 0x5d, 0xb2, 0x85, 0x7a, 0x59, 0xdd, 0xaa, 0xa1, 0x7a, 0x81, 0xbc, 0x9d, - 0xfd, 0xc0, 0x35, 0xfb, 0x84, 0x2a, 0xf6, 0x4c, 0xef, 0xda, 0xdf, 0xe7, 0xe0, 0xbe, 0xfb, 0x2d, - 0x57, 0x97, 0x4c, 0x86, 0x2f, 0x99, 0x6b, 0xce, 0xcc, 0x8c, 0xc4, 0x7d, 0x28, 0x84, 0x3c, 0x4e, - 0x54, 0x15, 0xd5, 0x50, 0xbd, 0xd8, 0x6e, 0x6d, 0x52, 0x65, 0x26, 0x25, 0x07, 0x9a, 0x48, 0x52, - 0x3e, 0x1e, 0x41, 0xc1, 0x78, 0x68, 0x9c, 0x28, 0xb6, 0x3f, 0xd9, 0xd4, 0x7a, 0x97, 0x45, 0xcc, - 0xd7, 0xf7, 0x3d, 0xa5, 0x0b, 0x3a, 0x97, 0x24, 0x55, 0xc1, 0x5f, 0x80, 0x45, 0x93, 0x59, 0x28, - 0x3c, 0x5f, 0xf0, 0xaf, 0xc2, 0xc0, 0x98, 0x58, 0x6c, 0x37, 0x36, 0x50, 0xed, 0x68, 0x5a, 0xd7, - 0xb0, 0x48, 0x91, 0xbe, 0x5a, 0xd8, 0x43, 0x28, 0xdf, 0x2c, 0x1d, 0xef, 0xc3, 0x96, 0x96, 0x30, - 0xbd, 0xef, 0x3c, 0xbb, 0x43, 0xcc, 0x4a, 0xa3, 0xda, 0xf7, 0xf4, 0x4a, 0x35, 0xaa, 0x57, 0x27, - 0x65, 0xb0, 0x4c, 0xa3, 0x9e, 0x14, 0xc9, 0xc2, 0x67, 0xf6, 0xcf, 0x08, 0xf6, 0xff, 0xa9, 0x01, - 0x7c, 0x08, 0xa5, 0x1b, 0xc1, 0x49, 0xd5, 0x89, 0xb5, 0x9a, 0x9b, 0xff, 0x2d, 0x36, 0xf6, 0x9f, - 0x08, 0x8a, 0x2b, 0x8e, 0xe0, 0xe7, 0x50, 0x4e, 0x7d, 0x65, 0xdc, 0x17, 0xb3, 0x90, 0x07, 0xa6, - 0xbc, 0x72, 0xfb, 0x83, 0x4d, 0x9d, 0x75, 0x32, 0x1e, 0x29, 0xd1, 0xd5, 0xa5, 0x6e, 0x5b, 0xc6, - 0x8c, 0x7e, 0x1d, 0xf2, 0xc0, 0x24, 0xd3, 0xb4, 0x86, 0x88, 0x75, 0x0d, 0xea, 0x38, 0xe2, 0x7d, - 0x28, 0xc4, 0xa1, 0xf2, 0x2f, 0x4d, 0x73, 0x88, 0xa4, 0x0b, 0xfc, 0x3e, 0x94, 0xaf, 0x44, 0x94, - 0xcc, 0x99, 0x17, 0xd0, 0x90, 0x7b, 0xb3, 0x0b, 0x93, 0x67, 0x44, 0xac, 0x14, 0xed, 0xd3, 0x90, - 0xf7, 0x2e, 0xf0, 0x11, 0xec, 0xdd, 0x0e, 0x7e, 0xc1, 0x04, 0x7f, 0x57, 0xae, 0x25, 0xfe, 0x33, - 0xa8, 0xde, 0x0e, 0x7c, 0x36, 0x24, 0x0e, 0xa1, 0xb4, 0x4c, 0x96, 0x62, 0x3c, 0xbd, 0x7d, 0x8b, - 0x58, 0xd7, 0x51, 0xd1, 0xd8, 0xd1, 0x73, 0xd8, 0x5d, 0x73, 0x15, 0x3f, 0x84, 0x07, 0xae, 0x3b, - 0x1a, 0x7a, 0xe7, 0x93, 0x41, 0xd7, 0xf1, 0xfa, 0xce, 0xb8, 0xe7, 0x10, 0xef, 0x6c, 0xec, 0x4e, - 0x9d, 0xee, 0xe0, 0xe9, 0xc0, 0xe9, 0x55, 0xee, 0xe0, 0x7b, 0xb0, 0x35, 0xea, 0x0c, 0x9d, 0x0a, - 0xc2, 0x00, 0xdb, 0x4f, 0x1d, 0xf3, 0x9d, 0xc3, 0x45, 0xb8, 0x3b, 0x76, 0xce, 0x4e, 0x49, 0x67, - 0x58, 0xc9, 0x1f, 0x9d, 0x42, 0xe9, 0x86, 0x8d, 0xf8, 0x3d, 0x38, 0xe8, 0x9c, 0xf5, 0x06, 0x13, - 0xcf, 0x19, 0x77, 0x27, 0xbd, 0xc1, 0xb8, 0xbf, 0xa6, 0x69, 0xc1, 0xbd, 0xe1, 0x60, 0xec, 0x74, - 0x48, 0xeb, 0xe3, 0x0a, 0xc2, 0x77, 0x21, 0x3f, 0x9a, 0x3e, 0xae, 0xe4, 0x34, 0x3c, 0xe9, 0xf7, - 0xbd, 0xc9, 0xf4, 0xcc, 0xad, 0xe4, 0xdb, 0xbf, 0xe7, 0xc0, 0x3a, 0x65, 0x2f, 0xd4, 0xa9, 0x48, - 0x9b, 0xc5, 0x3f, 0x22, 0x80, 0x57, 0x03, 0x12, 0x7f, 0xb8, 0xc1, 0xed, 0xde, 0x1a, 0xb2, 0x07, - 0x1f, 0xbd, 0x21, 0x2b, 0x35, 0xd8, 0xbe, 0xff, 0xdd, 0x6f, 0x7f, 0xfc, 0x94, 0xdb, 0xc3, 0xbb, - 0xcb, 0x37, 0x21, 0x1d, 0xae, 0xf8, 0x17, 0x04, 0x95, 0xf5, 0x6b, 0xc1, 0xc7, 0x6f, 0x30, 0x71, - 0xd6, 0x86, 0xd7, 0xc1, 0x93, 0xff, 0xc4, 0xcd, 0xca, 0x3c, 0x34, 0x65, 0x3e, 0xb0, 0xab, 0xcb, - 0x32, 0x35, 0xff, 0x58, 0x2e, 0x7f, 0x7f, 0x8c, 0x8e, 0x4e, 0x7e, 0x40, 0xf0, 0xc8, 0x17, 0xf3, - 0xd7, 0x9f, 0x73, 0xb2, 0xb7, 0xea, 0xff, 0x54, 0xbf, 0x5c, 0x53, 0xf4, 0xe5, 0x28, 0xe3, 0x05, - 0x42, 0xff, 0xfb, 0x1b, 0x62, 0x11, 0x34, 0x03, 0xc6, 0xcd, 0xbb, 0xd6, 0x4c, 0xb7, 0x68, 0x1c, - 0xca, 0x7f, 0x79, 0x4e, 0x9f, 0xac, 0x82, 0x7f, 0x21, 0x74, 0xb1, 0x6d, 0xc8, 0x8f, 0xff, 0x0e, - 0x00, 0x00, 0xff, 0xff, 0x7f, 0xac, 0x4e, 0x87, 0x8a, 0x07, 0x00, 0x00, + 0x18, 0xee, 0xd8, 0x71, 0xda, 0xbe, 0x5e, 0x3b, 0xce, 0x28, 0xa2, 0x26, 0xa2, 0x28, 0xdd, 0x50, + 0xc9, 0xca, 0xc1, 0xc6, 0x2e, 0x9f, 0xe9, 0x01, 0x1c, 0x7b, 0xeb, 0x5a, 0xf2, 0x17, 0xb3, 0x49, + 0x2a, 0x71, 0x59, 0x4d, 0xd6, 0xc3, 0x66, 0xc5, 0x7a, 0x66, 0xf1, 0x8c, 0xa3, 0xd2, 0x23, 0xe2, + 0xcc, 0x01, 0xfe, 0x02, 0x3f, 0x80, 0xdf, 0x02, 0x12, 0xbf, 0x80, 0x7f, 0xc0, 0x85, 0x23, 0x9a, + 0x99, 0x4d, 0xea, 0x38, 0x88, 0x3a, 0x1c, 0xb8, 0xed, 0x3c, 0xe3, 0xe7, 0x9d, 0xe7, 0x7d, 0xe6, + 0xf1, 0x3b, 0xd0, 0x8c, 0x84, 0x88, 0x12, 0xd6, 0x08, 0x13, 0xb1, 0x98, 0x36, 0x14, 0x7b, 0xa9, + 0x94, 0x90, 0x29, 0x63, 0xe1, 0x79, 0xe3, 0xa2, 0x79, 0xc6, 0x14, 0x6d, 0xda, 0xad, 0x40, 0x29, + 0x59, 0x4f, 0xe7, 0x42, 0x09, 0xfc, 0xc8, 0x52, 0xea, 0x06, 0xaf, 0x2f, 0x53, 0xea, 0x19, 0x65, + 0xf7, 0x9d, 0xac, 0x2a, 0x4d, 0xe3, 0x06, 0xe5, 0x5c, 0x28, 0xaa, 0x62, 0xc1, 0xb3, 0x02, 0xee, + 0x27, 0xb0, 0x3d, 0x88, 0xa5, 0x3a, 0x15, 0x71, 0xc8, 0x24, 0x61, 0xdf, 0x2c, 0x98, 0x54, 0x78, + 0x1f, 0x4a, 0x09, 0xe5, 0xd1, 0x82, 0x46, 0x2c, 0x08, 0xc5, 0x94, 0x55, 0xd1, 0x1e, 0xaa, 0xdd, + 0x27, 0xce, 0x25, 0xd8, 0x11, 0x53, 0xe6, 0x9e, 0x02, 0x5e, 0x66, 0xca, 0x54, 0x70, 0xc9, 0xf0, + 0xe7, 0xb0, 0x79, 0x61, 0x90, 0x2a, 0xda, 0xcb, 0xd7, 0x8a, 0xad, 0x5a, 0xfd, 0x8d, 0x0a, 0xeb, + 0xa6, 0x04, 0xc9, 0x78, 0xee, 0xaf, 0x08, 0x0a, 0x06, 0xc1, 0x8f, 0xa1, 0x7c, 0x4d, 0x86, 0xad, + 0x79, 0x9f, 0x94, 0x96, 0x75, 0x48, 0x8c, 0x61, 0x83, 0xd3, 0x19, 0xab, 0xe6, 0x8c, 0x48, 0xf3, + 0x8d, 0x7d, 0x28, 0x4a, 0x39, 0x4b, 0x82, 0x88, 0xf1, 0x29, 0x9b, 0x57, 0xf3, 0x7b, 0xa8, 0x56, + 0x6e, 0xb5, 0xd6, 0xd0, 0xe2, 0xcb, 0x59, 0x62, 0x4e, 0xef, 0x19, 0x26, 0x01, 0x5d, 0xc6, 0x7e, + 0xe3, 0x4f, 0xe1, 0x6d, 0x4e, 0xd5, 0x62, 0x4e, 0x93, 0x40, 0xd2, 0x59, 0x9a, 0xb0, 0x60, 0x4e, + 0x15, 0x0b, 0xce, 0xd9, 0x5c, 0xbd, 0xaa, 0x6e, 0xec, 0xa1, 0x5a, 0x81, 0xbc, 0x95, 0xfd, 0xc0, + 0x37, 0xfb, 0x84, 0x2a, 0xf6, 0x5c, 0xef, 0xba, 0xdf, 0xe7, 0xe0, 0x81, 0xff, 0x2d, 0x57, 0xe7, + 0x4c, 0xc6, 0xaf, 0x98, 0x6f, 0xce, 0xbc, 0x74, 0xbb, 0x07, 0x85, 0x98, 0xa7, 0x0b, 0x65, 0x5c, + 0x2e, 0xb6, 0x9a, 0xeb, 0xa8, 0xcc, 0x4a, 0xc9, 0xbe, 0x26, 0x12, 0xcb, 0xc7, 0x43, 0x28, 0x18, + 0x0f, 0x8d, 0x13, 0xc5, 0xd6, 0xc7, 0xeb, 0x5a, 0xef, 0xb3, 0x84, 0x85, 0x3a, 0x14, 0x13, 0x3a, + 0xa7, 0x33, 0x49, 0x6c, 0x15, 0xfc, 0x05, 0x38, 0x74, 0x31, 0x8d, 0x45, 0x10, 0x0a, 0xfe, 0x55, + 0x1c, 0x19, 0x13, 0x8b, 0xad, 0xfa, 0x1a, 0x55, 0xdb, 0x9a, 0xd6, 0x31, 0x2c, 0x52, 0xa4, 0xaf, + 0x17, 0xee, 0x00, 0xca, 0xd7, 0xa5, 0xe3, 0x1d, 0xd8, 0xd0, 0x25, 0x6c, 0xc2, 0x9e, 0xdf, 0x21, + 0x66, 0xa5, 0x51, 0xed, 0xbb, 0xbd, 0x52, 0x8d, 0xea, 0xd5, 0x51, 0x19, 0x1c, 0xd3, 0x68, 0x20, + 0xc5, 0x62, 0x1e, 0x32, 0xf7, 0x67, 0x04, 0x3b, 0xff, 0xd4, 0xc0, 0x5a, 0xf9, 0xfd, 0xdf, 0x62, + 0xe3, 0xfe, 0x89, 0xa0, 0xb8, 0xe4, 0x08, 0x7e, 0x01, 0x65, 0xeb, 0x2b, 0xe3, 0xa1, 0x98, 0xc6, + 0x3c, 0x32, 0xf2, 0xca, 0xad, 0xf7, 0xd7, 0x75, 0xd6, 0xcb, 0x78, 0xa4, 0x44, 0x97, 0x97, 0xba, + 0x6d, 0x99, 0x32, 0xfa, 0x75, 0xcc, 0x23, 0x93, 0x4c, 0xd3, 0x1a, 0x22, 0xce, 0x25, 0xa8, 0xe3, + 0x88, 0x77, 0xa0, 0x90, 0xc6, 0x2a, 0x3c, 0x37, 0xcd, 0x21, 0x62, 0x17, 0xf8, 0x3d, 0x28, 0x5f, + 0x88, 0x64, 0x31, 0x63, 0x41, 0x44, 0x63, 0x1e, 0x4c, 0xcf, 0x4c, 0x9e, 0x11, 0x71, 0x2c, 0xda, + 0xa3, 0x31, 0xef, 0x9e, 0xe1, 0x03, 0xd8, 0xbe, 0x19, 0xfc, 0x82, 0x09, 0xfe, 0x96, 0x5c, 0x49, + 0xfc, 0x67, 0x50, 0xbd, 0x19, 0xf8, 0x6c, 0x48, 0xec, 0x43, 0xe9, 0x2a, 0x59, 0x8a, 0x71, 0x7b, + 0xfb, 0x0e, 0x71, 0x2e, 0xa3, 0xa2, 0xb1, 0x83, 0x17, 0xb0, 0xb5, 0xe2, 0x2a, 0x7e, 0x04, 0x0f, + 0x7d, 0x7f, 0x38, 0x08, 0x4e, 0xc7, 0xfd, 0x8e, 0x17, 0xf4, 0xbc, 0x51, 0xd7, 0x23, 0xc1, 0xc9, + 0xc8, 0x9f, 0x78, 0x9d, 0xfe, 0xb3, 0xbe, 0xd7, 0xad, 0xdc, 0xc1, 0xf7, 0x60, 0x63, 0xd8, 0x1e, + 0x78, 0x15, 0x84, 0x01, 0x36, 0x9f, 0x79, 0xe6, 0x3b, 0x87, 0x8b, 0x70, 0x77, 0xe4, 0x9d, 0x1c, + 0x93, 0xf6, 0xa0, 0x92, 0x3f, 0x38, 0x86, 0xd2, 0x35, 0x1b, 0xf1, 0xbb, 0xb0, 0xdb, 0x3e, 0xe9, + 0xf6, 0xc7, 0x81, 0x37, 0xea, 0x8c, 0xbb, 0xfd, 0x51, 0x6f, 0xa5, 0xa6, 0x03, 0xf7, 0x06, 0xfd, + 0x91, 0xd7, 0x26, 0xcd, 0x8f, 0x2a, 0x08, 0xdf, 0x85, 0xfc, 0x70, 0xf2, 0xa4, 0x92, 0xd3, 0xf0, + 0xb8, 0xd7, 0x0b, 0xc6, 0x93, 0x13, 0xbf, 0x92, 0x6f, 0xfd, 0x9e, 0x03, 0xe7, 0x98, 0xbd, 0x54, + 0xc7, 0xc2, 0x36, 0x8b, 0x7f, 0x44, 0x00, 0xaf, 0x07, 0x24, 0xfe, 0x60, 0x8d, 0xdb, 0xbd, 0x31, + 0x89, 0x77, 0x3f, 0xbc, 0x25, 0xcb, 0x1a, 0xec, 0x3e, 0xf8, 0xee, 0xb7, 0x3f, 0x7e, 0xca, 0x6d, + 0xe3, 0xad, 0xab, 0x87, 0xc3, 0x0e, 0x57, 0xfc, 0x0b, 0x82, 0xca, 0xea, 0xb5, 0xe0, 0xc3, 0x5b, + 0x4c, 0x9c, 0x95, 0xe1, 0xb5, 0xfb, 0xf4, 0x3f, 0x71, 0x33, 0x99, 0xfb, 0x46, 0xe6, 0x43, 0xb7, + 0x7a, 0x25, 0x53, 0xf3, 0x0f, 0xe5, 0xd5, 0xef, 0x0f, 0xd1, 0xc1, 0xd1, 0x0f, 0x08, 0x1e, 0x87, + 0x62, 0xf6, 0xe6, 0x73, 0x8e, 0xb6, 0x97, 0xfd, 0x9f, 0xe8, 0xe7, 0x6d, 0x82, 0xbe, 0x1c, 0x66, + 0xbc, 0x48, 0xe8, 0x7f, 0x7f, 0x5d, 0xcc, 0xa3, 0x46, 0xc4, 0xb8, 0x79, 0xfc, 0x1a, 0x76, 0x8b, + 0xa6, 0xb1, 0xfc, 0x97, 0x37, 0xf7, 0xe9, 0x32, 0xf8, 0x17, 0x42, 0x67, 0x9b, 0x86, 0xfc, 0xe4, + 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, 0xb8, 0x07, 0xd3, 0xaf, 0x07, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1/video_intelligence.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1/video_intelligence.pb.go new file mode 100644 index 000000000..7da3439c7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1/video_intelligence.pb.go @@ -0,0 +1,1551 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/videointelligence/v1p1beta1/video_intelligence.proto + +/* +Package videointelligence is a generated protocol buffer package. + +It is generated from these files: + google/cloud/videointelligence/v1p1beta1/video_intelligence.proto + +It has these top-level messages: + AnnotateVideoRequest + VideoContext + LabelDetectionConfig + ShotChangeDetectionConfig + ExplicitContentDetectionConfig + FaceConfig + VideoSegment + LabelSegment + LabelFrame + Entity + LabelAnnotation + ExplicitContentFrame + ExplicitContentAnnotation + NormalizedBoundingBox + FaceSegment + FaceDetectionFrame + FaceDetectionAttribute + EmotionAttribute + FaceDetectionAnnotation + VideoAnnotationResults + AnnotateVideoResponse + VideoAnnotationProgress + AnnotateVideoProgress + SpeechTranscriptionConfig + SpeechContext + SpeechTranscription + SpeechRecognitionAlternative + WordInfo +*/ +package videointelligence + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Video annotation feature. +type Feature int32 + +const ( + // Unspecified. + Feature_FEATURE_UNSPECIFIED Feature = 0 + // Label detection. Detect objects, such as dog or flower. + Feature_LABEL_DETECTION Feature = 1 + // Shot change detection. + Feature_SHOT_CHANGE_DETECTION Feature = 2 + // Explicit content detection. + Feature_EXPLICIT_CONTENT_DETECTION Feature = 3 + // Face detection. + Feature_FACE_DETECTION Feature = 8 + // Speech transcription. + Feature_SPEECH_TRANSCRIPTION Feature = 6 +) + +var Feature_name = map[int32]string{ + 0: "FEATURE_UNSPECIFIED", + 1: "LABEL_DETECTION", + 2: "SHOT_CHANGE_DETECTION", + 3: "EXPLICIT_CONTENT_DETECTION", + 8: "FACE_DETECTION", + 6: "SPEECH_TRANSCRIPTION", +} +var Feature_value = map[string]int32{ + "FEATURE_UNSPECIFIED": 0, + "LABEL_DETECTION": 1, + "SHOT_CHANGE_DETECTION": 2, + "EXPLICIT_CONTENT_DETECTION": 3, + "FACE_DETECTION": 8, + "SPEECH_TRANSCRIPTION": 6, +} + +func (x Feature) String() string { + return proto.EnumName(Feature_name, int32(x)) +} +func (Feature) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Label detection mode. +type LabelDetectionMode int32 + +const ( + // Unspecified. + LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0 + // Detect shot-level labels. + LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1 + // Detect frame-level labels. + LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2 + // Detect both shot-level and frame-level labels. + LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3 +) + +var LabelDetectionMode_name = map[int32]string{ + 0: "LABEL_DETECTION_MODE_UNSPECIFIED", + 1: "SHOT_MODE", + 2: "FRAME_MODE", + 3: "SHOT_AND_FRAME_MODE", +} +var LabelDetectionMode_value = map[string]int32{ + "LABEL_DETECTION_MODE_UNSPECIFIED": 0, + "SHOT_MODE": 1, + "FRAME_MODE": 2, + "SHOT_AND_FRAME_MODE": 3, +} + +func (x LabelDetectionMode) String() string { + return proto.EnumName(LabelDetectionMode_name, int32(x)) +} +func (LabelDetectionMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// Bucketized representation of likelihood. +type Likelihood int32 + +const ( + // Unspecified likelihood. + Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0 + // Very unlikely. + Likelihood_VERY_UNLIKELY Likelihood = 1 + // Unlikely. + Likelihood_UNLIKELY Likelihood = 2 + // Possible. + Likelihood_POSSIBLE Likelihood = 3 + // Likely. + Likelihood_LIKELY Likelihood = 4 + // Very likely. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "LIKELIHOOD_UNSPECIFIED", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "LIKELIHOOD_UNSPECIFIED": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// Emotions. +type Emotion int32 + +const ( + // Unspecified emotion. + Emotion_EMOTION_UNSPECIFIED Emotion = 0 + // Amusement. + Emotion_AMUSEMENT Emotion = 1 + // Anger. + Emotion_ANGER Emotion = 2 + // Concentration. + Emotion_CONCENTRATION Emotion = 3 + // Contentment. + Emotion_CONTENTMENT Emotion = 4 + // Desire. + Emotion_DESIRE Emotion = 5 + // Disappointment. + Emotion_DISAPPOINTMENT Emotion = 6 + // Disgust. + Emotion_DISGUST Emotion = 7 + // Elation. + Emotion_ELATION Emotion = 8 + // Embarrassment. + Emotion_EMBARRASSMENT Emotion = 9 + // Interest. + Emotion_INTEREST Emotion = 10 + // Pride. + Emotion_PRIDE Emotion = 11 + // Sadness. + Emotion_SADNESS Emotion = 12 + // Surprise. + Emotion_SURPRISE Emotion = 13 +) + +var Emotion_name = map[int32]string{ + 0: "EMOTION_UNSPECIFIED", + 1: "AMUSEMENT", + 2: "ANGER", + 3: "CONCENTRATION", + 4: "CONTENTMENT", + 5: "DESIRE", + 6: "DISAPPOINTMENT", + 7: "DISGUST", + 8: "ELATION", + 9: "EMBARRASSMENT", + 10: "INTEREST", + 11: "PRIDE", + 12: "SADNESS", + 13: "SURPRISE", +} +var Emotion_value = map[string]int32{ + "EMOTION_UNSPECIFIED": 0, + "AMUSEMENT": 1, + "ANGER": 2, + "CONCENTRATION": 3, + "CONTENTMENT": 4, + "DESIRE": 5, + "DISAPPOINTMENT": 6, + "DISGUST": 7, + "ELATION": 8, + "EMBARRASSMENT": 9, + "INTEREST": 10, + "PRIDE": 11, + "SADNESS": 12, + "SURPRISE": 13, +} + +func (x Emotion) String() string { + return proto.EnumName(Emotion_name, int32(x)) +} +func (Emotion) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// Video annotation request. +type AnnotateVideoRequest struct { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"` + // Requested video annotation features. + Features []Feature `protobuf:"varint,2,rep,packed,name=features,enum=google.cloud.videointelligence.v1p1beta1.Feature" json:"features,omitempty"` + // Additional video context and/or feature-specific parameters. + VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext" json:"video_context,omitempty"` + // Optional location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` + // Optional cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId" json:"location_id,omitempty"` +} + +func (m *AnnotateVideoRequest) Reset() { *m = AnnotateVideoRequest{} } +func (m *AnnotateVideoRequest) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoRequest) ProtoMessage() {} +func (*AnnotateVideoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *AnnotateVideoRequest) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *AnnotateVideoRequest) GetInputContent() []byte { + if m != nil { + return m.InputContent + } + return nil +} + +func (m *AnnotateVideoRequest) GetFeatures() []Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext { + if m != nil { + return m.VideoContext + } + return nil +} + +func (m *AnnotateVideoRequest) GetOutputUri() string { + if m != nil { + return m.OutputUri + } + return "" +} + +func (m *AnnotateVideoRequest) GetLocationId() string { + if m != nil { + return m.LocationId + } + return "" +} + +// Video context and/or feature-specific parameters. +type VideoContext struct { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video + // is treated as a single segment. + Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty"` + // Config for LABEL_DETECTION. + LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig" json:"label_detection_config,omitempty"` + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig" json:"shot_change_detection_config,omitempty"` + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig" json:"explicit_content_detection_config,omitempty"` + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig *SpeechTranscriptionConfig `protobuf:"bytes,6,opt,name=speech_transcription_config,json=speechTranscriptionConfig" json:"speech_transcription_config,omitempty"` + // Config for FACE_DETECTION. + FaceDetectionConfig *FaceConfig `protobuf:"bytes,7,opt,name=face_detection_config,json=faceDetectionConfig" json:"face_detection_config,omitempty"` +} + +func (m *VideoContext) Reset() { *m = VideoContext{} } +func (m *VideoContext) String() string { return proto.CompactTextString(m) } +func (*VideoContext) ProtoMessage() {} +func (*VideoContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *VideoContext) GetSegments() []*VideoSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig { + if m != nil { + return m.LabelDetectionConfig + } + return nil +} + +func (m *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig { + if m != nil { + return m.ShotChangeDetectionConfig + } + return nil +} + +func (m *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig { + if m != nil { + return m.ExplicitContentDetectionConfig + } + return nil +} + +func (m *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig { + if m != nil { + return m.SpeechTranscriptionConfig + } + return nil +} + +func (m *VideoContext) GetFaceDetectionConfig() *FaceConfig { + if m != nil { + return m.FaceDetectionConfig + } + return nil +} + +// Config for LABEL_DETECTION. +type LabelDetectionConfig struct { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,enum=google.cloud.videointelligence.v1p1beta1.LabelDetectionMode" json:"label_detection_mode,omitempty"` + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera" json:"stationary_camera,omitempty"` + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,3,opt,name=model" json:"model,omitempty"` +} + +func (m *LabelDetectionConfig) Reset() { *m = LabelDetectionConfig{} } +func (m *LabelDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*LabelDetectionConfig) ProtoMessage() {} +func (*LabelDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode { + if m != nil { + return m.LabelDetectionMode + } + return LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED +} + +func (m *LabelDetectionConfig) GetStationaryCamera() bool { + if m != nil { + return m.StationaryCamera + } + return false +} + +func (m *LabelDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// Config for SHOT_CHANGE_DETECTION. +type ShotChangeDetectionConfig struct { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` +} + +func (m *ShotChangeDetectionConfig) Reset() { *m = ShotChangeDetectionConfig{} } +func (m *ShotChangeDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*ShotChangeDetectionConfig) ProtoMessage() {} +func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ShotChangeDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// Config for EXPLICIT_CONTENT_DETECTION. +type ExplicitContentDetectionConfig struct { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` +} + +func (m *ExplicitContentDetectionConfig) Reset() { *m = ExplicitContentDetectionConfig{} } +func (m *ExplicitContentDetectionConfig) String() string { return proto.CompactTextString(m) } +func (*ExplicitContentDetectionConfig) ProtoMessage() {} +func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ExplicitContentDetectionConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +// Config for FACE_DETECTION. +type FaceConfig struct { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` + // Whether bounding boxes be included in the face annotation output. + IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes" json:"include_bounding_boxes,omitempty"` + // Whether to enable emotion detection. Ignored if 'include_bounding_boxes' is + // false. + IncludeEmotions bool `protobuf:"varint,4,opt,name=include_emotions,json=includeEmotions" json:"include_emotions,omitempty"` +} + +func (m *FaceConfig) Reset() { *m = FaceConfig{} } +func (m *FaceConfig) String() string { return proto.CompactTextString(m) } +func (*FaceConfig) ProtoMessage() {} +func (*FaceConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *FaceConfig) GetModel() string { + if m != nil { + return m.Model + } + return "" +} + +func (m *FaceConfig) GetIncludeBoundingBoxes() bool { + if m != nil { + return m.IncludeBoundingBoxes + } + return false +} + +func (m *FaceConfig) GetIncludeEmotions() bool { + if m != nil { + return m.IncludeEmotions + } + return false +} + +// Video segment. +type VideoSegment struct { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + StartTimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset" json:"start_time_offset,omitempty"` + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + EndTimeOffset *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset" json:"end_time_offset,omitempty"` +} + +func (m *VideoSegment) Reset() { *m = VideoSegment{} } +func (m *VideoSegment) String() string { return proto.CompactTextString(m) } +func (*VideoSegment) ProtoMessage() {} +func (*VideoSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *VideoSegment) GetStartTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.StartTimeOffset + } + return nil +} + +func (m *VideoSegment) GetEndTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.EndTimeOffset + } + return nil +} + +// Video segment level annotation results for label detection. +type LabelSegment struct { + // Video segment where a label was detected. + Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` + // Confidence that the label is accurate. Range: [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *LabelSegment) Reset() { *m = LabelSegment{} } +func (m *LabelSegment) String() string { return proto.CompactTextString(m) } +func (*LabelSegment) ProtoMessage() {} +func (*LabelSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *LabelSegment) GetSegment() *VideoSegment { + if m != nil { + return m.Segment + } + return nil +} + +func (m *LabelSegment) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Video frame level annotation results for label detection. +type LabelFrame struct { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + TimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` + // Confidence that the label is accurate. Range: [0, 1]. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` +} + +func (m *LabelFrame) Reset() { *m = LabelFrame{} } +func (m *LabelFrame) String() string { return proto.CompactTextString(m) } +func (*LabelFrame) ProtoMessage() {} +func (*LabelFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *LabelFrame) GetTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.TimeOffset + } + return nil +} + +func (m *LabelFrame) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +// Detected entity from video analysis. +type Entity struct { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` + // Textual description, e.g. `Fixed-gear bicycle`. + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + // Language code for `description` in BCP-47 format. + LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` +} + +func (m *Entity) Reset() { *m = Entity{} } +func (m *Entity) String() string { return proto.CompactTextString(m) } +func (*Entity) ProtoMessage() {} +func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Entity) GetEntityId() string { + if m != nil { + return m.EntityId + } + return "" +} + +func (m *Entity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Entity) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +// Label annotation. +type LabelAnnotation struct { + // Detected entity. + Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities" json:"category_entities,omitempty"` + // All video segments where a label was detected. + Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments" json:"segments,omitempty"` + // All video frames where a label was detected. + Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames" json:"frames,omitempty"` +} + +func (m *LabelAnnotation) Reset() { *m = LabelAnnotation{} } +func (m *LabelAnnotation) String() string { return proto.CompactTextString(m) } +func (*LabelAnnotation) ProtoMessage() {} +func (*LabelAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *LabelAnnotation) GetEntity() *Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *LabelAnnotation) GetCategoryEntities() []*Entity { + if m != nil { + return m.CategoryEntities + } + return nil +} + +func (m *LabelAnnotation) GetSegments() []*LabelSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *LabelAnnotation) GetFrames() []*LabelFrame { + if m != nil { + return m.Frames + } + return nil +} + +// Video frame level annotation results for explicit content. +type ExplicitContentFrame struct { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + TimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` + // Likelihood of the pornography content.. + PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,enum=google.cloud.videointelligence.v1p1beta1.Likelihood" json:"pornography_likelihood,omitempty"` +} + +func (m *ExplicitContentFrame) Reset() { *m = ExplicitContentFrame{} } +func (m *ExplicitContentFrame) String() string { return proto.CompactTextString(m) } +func (*ExplicitContentFrame) ProtoMessage() {} +func (*ExplicitContentFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ExplicitContentFrame) GetTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.TimeOffset + } + return nil +} + +func (m *ExplicitContentFrame) GetPornographyLikelihood() Likelihood { + if m != nil { + return m.PornographyLikelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +type ExplicitContentAnnotation struct { + // All video frames where explicit content was detected. + Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` +} + +func (m *ExplicitContentAnnotation) Reset() { *m = ExplicitContentAnnotation{} } +func (m *ExplicitContentAnnotation) String() string { return proto.CompactTextString(m) } +func (*ExplicitContentAnnotation) ProtoMessage() {} +func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame { + if m != nil { + return m.Frames + } + return nil +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +type NormalizedBoundingBox struct { + // Left X coordinate. + Left float32 `protobuf:"fixed32,1,opt,name=left" json:"left,omitempty"` + // Top Y coordinate. + Top float32 `protobuf:"fixed32,2,opt,name=top" json:"top,omitempty"` + // Right X coordinate. + Right float32 `protobuf:"fixed32,3,opt,name=right" json:"right,omitempty"` + // Bottom Y coordinate. + Bottom float32 `protobuf:"fixed32,4,opt,name=bottom" json:"bottom,omitempty"` +} + +func (m *NormalizedBoundingBox) Reset() { *m = NormalizedBoundingBox{} } +func (m *NormalizedBoundingBox) String() string { return proto.CompactTextString(m) } +func (*NormalizedBoundingBox) ProtoMessage() {} +func (*NormalizedBoundingBox) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *NormalizedBoundingBox) GetLeft() float32 { + if m != nil { + return m.Left + } + return 0 +} + +func (m *NormalizedBoundingBox) GetTop() float32 { + if m != nil { + return m.Top + } + return 0 +} + +func (m *NormalizedBoundingBox) GetRight() float32 { + if m != nil { + return m.Right + } + return 0 +} + +func (m *NormalizedBoundingBox) GetBottom() float32 { + if m != nil { + return m.Bottom + } + return 0 +} + +// Video segment level annotation results for face detection. +type FaceSegment struct { + // Video segment where a face was detected. + Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` +} + +func (m *FaceSegment) Reset() { *m = FaceSegment{} } +func (m *FaceSegment) String() string { return proto.CompactTextString(m) } +func (*FaceSegment) ProtoMessage() {} +func (*FaceSegment) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *FaceSegment) GetSegment() *VideoSegment { + if m != nil { + return m.Segment + } + return nil +} + +// Video frame level annotation results for face detection. +type FaceDetectionFrame struct { + // Face attributes in a frame. + // There can be more than one attributes if the same face is detected in + // multiple locations within the current frame. + Attributes []*FaceDetectionAttribute `protobuf:"bytes,1,rep,name=attributes" json:"attributes,omitempty"` + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + TimeOffset *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` +} + +func (m *FaceDetectionFrame) Reset() { *m = FaceDetectionFrame{} } +func (m *FaceDetectionFrame) String() string { return proto.CompactTextString(m) } +func (*FaceDetectionFrame) ProtoMessage() {} +func (*FaceDetectionFrame) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *FaceDetectionFrame) GetAttributes() []*FaceDetectionAttribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *FaceDetectionFrame) GetTimeOffset() *google_protobuf3.Duration { + if m != nil { + return m.TimeOffset + } + return nil +} + +// Face detection attribute. +type FaceDetectionAttribute struct { + // Normalized Bounding box. + NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox" json:"normalized_bounding_box,omitempty"` + // Emotion attributes. + Emotions []*EmotionAttribute `protobuf:"bytes,2,rep,name=emotions" json:"emotions,omitempty"` +} + +func (m *FaceDetectionAttribute) Reset() { *m = FaceDetectionAttribute{} } +func (m *FaceDetectionAttribute) String() string { return proto.CompactTextString(m) } +func (*FaceDetectionAttribute) ProtoMessage() {} +func (*FaceDetectionAttribute) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *FaceDetectionAttribute) GetNormalizedBoundingBox() *NormalizedBoundingBox { + if m != nil { + return m.NormalizedBoundingBox + } + return nil +} + +func (m *FaceDetectionAttribute) GetEmotions() []*EmotionAttribute { + if m != nil { + return m.Emotions + } + return nil +} + +// Emotion attribute. +type EmotionAttribute struct { + // Emotion entry. + Emotion Emotion `protobuf:"varint,1,opt,name=emotion,enum=google.cloud.videointelligence.v1p1beta1.Emotion" json:"emotion,omitempty"` + // Confidence score. + Score float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` +} + +func (m *EmotionAttribute) Reset() { *m = EmotionAttribute{} } +func (m *EmotionAttribute) String() string { return proto.CompactTextString(m) } +func (*EmotionAttribute) ProtoMessage() {} +func (*EmotionAttribute) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *EmotionAttribute) GetEmotion() Emotion { + if m != nil { + return m.Emotion + } + return Emotion_EMOTION_UNSPECIFIED +} + +func (m *EmotionAttribute) GetScore() float32 { + if m != nil { + return m.Score + } + return 0 +} + +// Face detection annotation. +type FaceDetectionAnnotation struct { + // All video segments where a face was detected. + Segments []*FaceSegment `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty"` + // All video frames where a face was detected. + Frames []*FaceDetectionFrame `protobuf:"bytes,2,rep,name=frames" json:"frames,omitempty"` +} + +func (m *FaceDetectionAnnotation) Reset() { *m = FaceDetectionAnnotation{} } +func (m *FaceDetectionAnnotation) String() string { return proto.CompactTextString(m) } +func (*FaceDetectionAnnotation) ProtoMessage() {} +func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *FaceDetectionAnnotation) GetSegments() []*FaceSegment { + if m != nil { + return m.Segments + } + return nil +} + +func (m *FaceDetectionAnnotation) GetFrames() []*FaceDetectionFrame { + if m != nil { + return m.Frames + } + return nil +} + +// Annotation results for a single video. +type VideoAnnotationResults struct { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations" json:"segment_label_annotations,omitempty"` + // Label annotations on shot level. + // There is exactly one element for each unique label. + ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations" json:"shot_label_annotations,omitempty"` + // Label annotations on frame level. + // There is exactly one element for each unique label. + FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations" json:"frame_label_annotations,omitempty"` + // Face detection annotations. + FaceDetectionAnnotations []*FaceDetectionAnnotation `protobuf:"bytes,13,rep,name=face_detection_annotations,json=faceDetectionAnnotations" json:"face_detection_annotations,omitempty"` + // Shot annotations. Each shot is represented as a video segment. + ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations" json:"shot_annotations,omitempty"` + // Explicit content annotation. + ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation" json:"explicit_annotation,omitempty"` + // Speech transcription. + SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions" json:"speech_transcriptions,omitempty"` + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"` +} + +func (m *VideoAnnotationResults) Reset() { *m = VideoAnnotationResults{} } +func (m *VideoAnnotationResults) String() string { return proto.CompactTextString(m) } +func (*VideoAnnotationResults) ProtoMessage() {} +func (*VideoAnnotationResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *VideoAnnotationResults) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.SegmentLabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.ShotLabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation { + if m != nil { + return m.FrameLabelAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotation { + if m != nil { + return m.FaceDetectionAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment { + if m != nil { + return m.ShotAnnotations + } + return nil +} + +func (m *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation { + if m != nil { + return m.ExplicitAnnotation + } + return nil +} + +func (m *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription { + if m != nil { + return m.SpeechTranscriptions + } + return nil +} + +func (m *VideoAnnotationResults) GetError() *google_rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +type AnnotateVideoResponse struct { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults" json:"annotation_results,omitempty"` +} + +func (m *AnnotateVideoResponse) Reset() { *m = AnnotateVideoResponse{} } +func (m *AnnotateVideoResponse) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoResponse) ProtoMessage() {} +func (*AnnotateVideoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults { + if m != nil { + return m.AnnotationResults + } + return nil +} + +// Annotation progress for a single video. +type VideoAnnotationProgress struct { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` + // Time when the request was received. + StartTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time of the most recent update. + UpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` +} + +func (m *VideoAnnotationProgress) Reset() { *m = VideoAnnotationProgress{} } +func (m *VideoAnnotationProgress) String() string { return proto.CompactTextString(m) } +func (*VideoAnnotationProgress) ProtoMessage() {} +func (*VideoAnnotationProgress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *VideoAnnotationProgress) GetInputUri() string { + if m != nil { + return m.InputUri + } + return "" +} + +func (m *VideoAnnotationProgress) GetProgressPercent() int32 { + if m != nil { + return m.ProgressPercent + } + return 0 +} + +func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf4.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf4.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +type AnnotateVideoProgress struct { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress" json:"annotation_progress,omitempty"` +} + +func (m *AnnotateVideoProgress) Reset() { *m = AnnotateVideoProgress{} } +func (m *AnnotateVideoProgress) String() string { return proto.CompactTextString(m) } +func (*AnnotateVideoProgress) ProtoMessage() {} +func (*AnnotateVideoProgress) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress { + if m != nil { + return m.AnnotationProgress + } + return nil +} + +// Config for SPEECH_TRANSCRIPTION. +type SpeechTranscriptionConfig struct { + // *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // *Optional* Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechRecognitionResult`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives" json:"max_alternatives,omitempty"` + // *Optional* If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity" json:"filter_profanity,omitempty"` + // *Optional* A means to provide context to assist the speech recognition. + SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts" json:"speech_contexts,omitempty"` + // *Optional* For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks" json:"audio_tracks,omitempty"` +} + +func (m *SpeechTranscriptionConfig) Reset() { *m = SpeechTranscriptionConfig{} } +func (m *SpeechTranscriptionConfig) String() string { return proto.CompactTextString(m) } +func (*SpeechTranscriptionConfig) ProtoMessage() {} +func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *SpeechTranscriptionConfig) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *SpeechTranscriptionConfig) GetMaxAlternatives() int32 { + if m != nil { + return m.MaxAlternatives + } + return 0 +} + +func (m *SpeechTranscriptionConfig) GetFilterProfanity() bool { + if m != nil { + return m.FilterProfanity + } + return false +} + +func (m *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext { + if m != nil { + return m.SpeechContexts + } + return nil +} + +func (m *SpeechTranscriptionConfig) GetAudioTracks() []int32 { + if m != nil { + return m.AudioTracks + } + return nil +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +type SpeechContext struct { + // *Optional* A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + Phrases []string `protobuf:"bytes,1,rep,name=phrases" json:"phrases,omitempty"` +} + +func (m *SpeechContext) Reset() { *m = SpeechContext{} } +func (m *SpeechContext) String() string { return proto.CompactTextString(m) } +func (*SpeechContext) ProtoMessage() {} +func (*SpeechContext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *SpeechContext) GetPhrases() []string { + if m != nil { + return m.Phrases + } + return nil +} + +// A speech recognition result corresponding to a portion of the audio. +type SpeechTranscription struct { + // Output only. May contain one or more recognition hypotheses (up to the + // maximum specified in `max_alternatives`). + // These alternatives are ordered in terms of accuracy, with the top (first) + // alternative being the most probable, as ranked by the recognizer. + Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives" json:"alternatives,omitempty"` +} + +func (m *SpeechTranscription) Reset() { *m = SpeechTranscription{} } +func (m *SpeechTranscription) String() string { return proto.CompactTextString(m) } +func (*SpeechTranscription) ProtoMessage() {} +func (*SpeechTranscription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative { + if m != nil { + return m.Alternatives + } + return nil +} + +// Alternative hypotheses (a.k.a. n-best list). +type SpeechRecognitionAlternative struct { + // Output only. Transcript text representing the words that the user spoke. + Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"` + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is typically provided only for the top hypothesis, and + // only for `is_final=true` results. Clients should not rely on the + // `confidence` field as it is not guaranteed to be accurate or consistent. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` + // Output only. A list of word-specific information for each recognized word. + Words []*WordInfo `protobuf:"bytes,3,rep,name=words" json:"words,omitempty"` +} + +func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} } +func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionAlternative) ProtoMessage() {} +func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *SpeechRecognitionAlternative) GetTranscript() string { + if m != nil { + return m.Transcript + } + return "" +} + +func (m *SpeechRecognitionAlternative) GetConfidence() float32 { + if m != nil { + return m.Confidence + } + return 0 +} + +func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo { + if m != nil { + return m.Words + } + return nil +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +type WordInfo struct { + // Output only. Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + StartTime *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Output only. Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + EndTime *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Output only. The word corresponding to this set of information. + Word string `protobuf:"bytes,3,opt,name=word" json:"word,omitempty"` +} + +func (m *WordInfo) Reset() { *m = WordInfo{} } +func (m *WordInfo) String() string { return proto.CompactTextString(m) } +func (*WordInfo) ProtoMessage() {} +func (*WordInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *WordInfo) GetStartTime() *google_protobuf3.Duration { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *WordInfo) GetEndTime() *google_protobuf3.Duration { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *WordInfo) GetWord() string { + if m != nil { + return m.Word + } + return "" +} + +func init() { + proto.RegisterType((*AnnotateVideoRequest)(nil), "google.cloud.videointelligence.v1p1beta1.AnnotateVideoRequest") + proto.RegisterType((*VideoContext)(nil), "google.cloud.videointelligence.v1p1beta1.VideoContext") + proto.RegisterType((*LabelDetectionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.LabelDetectionConfig") + proto.RegisterType((*ShotChangeDetectionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.ShotChangeDetectionConfig") + proto.RegisterType((*ExplicitContentDetectionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.ExplicitContentDetectionConfig") + proto.RegisterType((*FaceConfig)(nil), "google.cloud.videointelligence.v1p1beta1.FaceConfig") + proto.RegisterType((*VideoSegment)(nil), "google.cloud.videointelligence.v1p1beta1.VideoSegment") + proto.RegisterType((*LabelSegment)(nil), "google.cloud.videointelligence.v1p1beta1.LabelSegment") + proto.RegisterType((*LabelFrame)(nil), "google.cloud.videointelligence.v1p1beta1.LabelFrame") + proto.RegisterType((*Entity)(nil), "google.cloud.videointelligence.v1p1beta1.Entity") + proto.RegisterType((*LabelAnnotation)(nil), "google.cloud.videointelligence.v1p1beta1.LabelAnnotation") + proto.RegisterType((*ExplicitContentFrame)(nil), "google.cloud.videointelligence.v1p1beta1.ExplicitContentFrame") + proto.RegisterType((*ExplicitContentAnnotation)(nil), "google.cloud.videointelligence.v1p1beta1.ExplicitContentAnnotation") + proto.RegisterType((*NormalizedBoundingBox)(nil), "google.cloud.videointelligence.v1p1beta1.NormalizedBoundingBox") + proto.RegisterType((*FaceSegment)(nil), "google.cloud.videointelligence.v1p1beta1.FaceSegment") + proto.RegisterType((*FaceDetectionFrame)(nil), "google.cloud.videointelligence.v1p1beta1.FaceDetectionFrame") + proto.RegisterType((*FaceDetectionAttribute)(nil), "google.cloud.videointelligence.v1p1beta1.FaceDetectionAttribute") + proto.RegisterType((*EmotionAttribute)(nil), "google.cloud.videointelligence.v1p1beta1.EmotionAttribute") + proto.RegisterType((*FaceDetectionAnnotation)(nil), "google.cloud.videointelligence.v1p1beta1.FaceDetectionAnnotation") + proto.RegisterType((*VideoAnnotationResults)(nil), "google.cloud.videointelligence.v1p1beta1.VideoAnnotationResults") + proto.RegisterType((*AnnotateVideoResponse)(nil), "google.cloud.videointelligence.v1p1beta1.AnnotateVideoResponse") + proto.RegisterType((*VideoAnnotationProgress)(nil), "google.cloud.videointelligence.v1p1beta1.VideoAnnotationProgress") + proto.RegisterType((*AnnotateVideoProgress)(nil), "google.cloud.videointelligence.v1p1beta1.AnnotateVideoProgress") + proto.RegisterType((*SpeechTranscriptionConfig)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechTranscriptionConfig") + proto.RegisterType((*SpeechContext)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechContext") + proto.RegisterType((*SpeechTranscription)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechTranscription") + proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.videointelligence.v1p1beta1.SpeechRecognitionAlternative") + proto.RegisterType((*WordInfo)(nil), "google.cloud.videointelligence.v1p1beta1.WordInfo") + proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.Feature", Feature_name, Feature_value) + proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.LabelDetectionMode", LabelDetectionMode_name, LabelDetectionMode_value) + proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.Likelihood", Likelihood_name, Likelihood_value) + proto.RegisterEnum("google.cloud.videointelligence.v1p1beta1.Emotion", Emotion_name, Emotion_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for VideoIntelligenceService service + +type VideoIntelligenceServiceClient interface { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type videoIntelligenceServiceClient struct { + cc *grpc.ClientConn +} + +func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient { + return &videoIntelligenceServiceClient{cc} +} + +func (c *videoIntelligenceServiceClient) AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for VideoIntelligenceService service + +type VideoIntelligenceServiceServer interface { + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + AnnotateVideo(context.Context, *AnnotateVideoRequest) (*google_longrunning.Operation, error) +} + +func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer) { + s.RegisterService(&_VideoIntelligenceService_serviceDesc, srv) +} + +func _VideoIntelligenceService_AnnotateVideo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AnnotateVideoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService/AnnotateVideo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VideoIntelligenceServiceServer).AnnotateVideo(ctx, req.(*AnnotateVideoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _VideoIntelligenceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService", + HandlerType: (*VideoIntelligenceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AnnotateVideo", + Handler: _VideoIntelligenceService_AnnotateVideo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/videointelligence/v1p1beta1/video_intelligence.proto", +} + +func init() { + proto.RegisterFile("google/cloud/videointelligence/v1p1beta1/video_intelligence.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 2237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0x4b, 0x6f, 0x23, 0x59, + 0xf5, 0xff, 0x97, 0x1d, 0x3b, 0xf1, 0x71, 0x12, 0xbb, 0x6f, 0x5e, 0x4e, 0xa6, 0x1f, 0x99, 0x9a, + 0x3f, 0x52, 0xba, 0x41, 0x0e, 0x09, 0xcd, 0xc0, 0xf4, 0x0c, 0x03, 0x15, 0xbb, 0xd2, 0xb1, 0x26, + 0xb1, 0xcd, 0x2d, 0x27, 0xd0, 0xd0, 0xa8, 0xa6, 0x52, 0x75, 0xed, 0x14, 0x5d, 0xae, 0xeb, 0xa9, + 0x2a, 0xf7, 0x74, 0xb3, 0x61, 0x60, 0x58, 0x8c, 0x84, 0xc4, 0x66, 0x84, 0x84, 0xc4, 0x86, 0x25, + 0x62, 0x39, 0x5f, 0x00, 0x09, 0xb1, 0x81, 0x2d, 0x2b, 0x24, 0x36, 0x48, 0xb0, 0xe0, 0x5b, 0xa0, + 0xfb, 0x28, 0xbb, 0xfc, 0x48, 0x27, 0x4e, 0x8b, 0x9d, 0xef, 0x39, 0xf7, 0xfe, 0xce, 0xe3, 0x9e, + 0x47, 0x9d, 0x6b, 0xd0, 0x3a, 0x94, 0x76, 0x3c, 0xb2, 0x6b, 0x7b, 0xb4, 0xef, 0xec, 0x3e, 0x77, + 0x1d, 0x42, 0x5d, 0x3f, 0x22, 0x9e, 0xe7, 0x76, 0x88, 0x6f, 0x93, 0xdd, 0xe7, 0x7b, 0xbd, 0xbd, + 0x73, 0x12, 0x59, 0x7b, 0x82, 0x67, 0x26, 0x99, 0xe5, 0x5e, 0x40, 0x23, 0x8a, 0x76, 0x04, 0x44, + 0x99, 0x43, 0x94, 0x27, 0x20, 0xca, 0x03, 0x88, 0xad, 0xdb, 0x52, 0x98, 0xd5, 0x73, 0x77, 0x2d, + 0xdf, 0xa7, 0x91, 0x15, 0xb9, 0xd4, 0x0f, 0x05, 0xce, 0xd6, 0x5b, 0x92, 0xeb, 0x51, 0xbf, 0x13, + 0xf4, 0x7d, 0xdf, 0xf5, 0x3b, 0xbb, 0xb4, 0x47, 0x82, 0x91, 0x4d, 0x77, 0xe5, 0x26, 0xbe, 0x3a, + 0xef, 0xb7, 0x77, 0x9d, 0xbe, 0xd8, 0x20, 0xf9, 0xf7, 0xc6, 0xf9, 0x91, 0xdb, 0x25, 0x61, 0x64, + 0x75, 0x7b, 0x72, 0xc3, 0x86, 0xdc, 0x10, 0xf4, 0xec, 0xdd, 0x30, 0xb2, 0xa2, 0xbe, 0x44, 0x56, + 0xff, 0x9c, 0x82, 0x55, 0x4d, 0x28, 0x45, 0xce, 0x98, 0x11, 0x98, 0x7c, 0xd4, 0x27, 0x61, 0x84, + 0xde, 0x80, 0x9c, 0xeb, 0xf7, 0xfa, 0x91, 0xd9, 0x0f, 0xdc, 0x92, 0xb2, 0xad, 0xec, 0xe4, 0xf0, + 0x02, 0x27, 0x9c, 0x06, 0x2e, 0x7a, 0x0b, 0x96, 0x04, 0xd3, 0xa6, 0x7e, 0x44, 0xfc, 0xa8, 0x94, + 0xdd, 0x56, 0x76, 0x16, 0xf1, 0x22, 0x27, 0x56, 0x04, 0x0d, 0x9d, 0xc0, 0x42, 0x9b, 0x58, 0x51, + 0x3f, 0x20, 0x61, 0x29, 0xb5, 0x9d, 0xde, 0x59, 0xde, 0xdf, 0x2b, 0x5f, 0xd7, 0x69, 0xe5, 0x43, + 0x71, 0x12, 0x0f, 0x20, 0xd0, 0x0f, 0x61, 0x49, 0x5c, 0x06, 0x97, 0xf9, 0x22, 0x2a, 0xa5, 0xb7, + 0x95, 0x9d, 0xfc, 0xfe, 0xdb, 0xd7, 0xc7, 0xe4, 0xf6, 0x55, 0xc4, 0x69, 0xbc, 0xf8, 0x3c, 0xb1, + 0x42, 0x77, 0x00, 0x68, 0x3f, 0x8a, 0xcd, 0x9d, 0xe3, 0xe6, 0xe6, 0x04, 0x85, 0xd9, 0x7b, 0x0f, + 0xf2, 0x1e, 0xb5, 0xb9, 0xc7, 0x4d, 0xd7, 0x29, 0x65, 0x38, 0x1f, 0x62, 0x52, 0xcd, 0x51, 0xff, + 0x9d, 0x81, 0xc5, 0x24, 0x3c, 0xc2, 0xb0, 0x10, 0x92, 0x4e, 0x97, 0xf8, 0x51, 0x58, 0x52, 0xb6, + 0xd3, 0x37, 0x50, 0xd4, 0x10, 0xc7, 0xf1, 0x00, 0x07, 0x45, 0xb0, 0xee, 0x59, 0xe7, 0xc4, 0x33, + 0x1d, 0x12, 0x11, 0x9b, 0x2b, 0x63, 0x53, 0xbf, 0xed, 0x76, 0x4a, 0x29, 0xee, 0x8a, 0xf7, 0xaf, + 0x2f, 0xe1, 0x98, 0xe1, 0x54, 0x63, 0x98, 0x0a, 0x47, 0xc1, 0xab, 0xde, 0x14, 0x2a, 0xfa, 0x85, + 0x02, 0xb7, 0xc3, 0x0b, 0x1a, 0x99, 0xf6, 0x85, 0xe5, 0x77, 0xc8, 0xa4, 0x70, 0x71, 0x0f, 0x95, + 0xeb, 0x0b, 0x37, 0x2e, 0x68, 0x54, 0xe1, 0x60, 0xe3, 0x1a, 0x6c, 0x86, 0x97, 0xb1, 0xd0, 0xe7, + 0x0a, 0xbc, 0x49, 0x5e, 0xf4, 0x3c, 0xd7, 0x76, 0x07, 0x61, 0x37, 0xa9, 0xcb, 0x1c, 0xd7, 0xe5, + 0xe8, 0xfa, 0xba, 0xe8, 0x12, 0x52, 0x06, 0xed, 0xb8, 0x42, 0x77, 0xc9, 0x2b, 0xf9, 0xe8, 0x53, + 0x05, 0xde, 0x08, 0x7b, 0x84, 0xd8, 0x17, 0x66, 0x14, 0x58, 0x7e, 0x68, 0x07, 0x6e, 0x2f, 0xa9, + 0x4f, 0x76, 0x66, 0xdf, 0x70, 0xb0, 0x56, 0x12, 0x6b, 0xe0, 0x9b, 0xcb, 0x58, 0xe8, 0x02, 0xd6, + 0xda, 0x96, 0x3d, 0xe5, 0x6a, 0xe6, 0xb9, 0xf8, 0x87, 0x33, 0xa4, 0x9d, 0x65, 0x13, 0x29, 0x6f, + 0x85, 0x41, 0x8e, 0xd9, 0xab, 0xfe, 0x45, 0x81, 0xd5, 0x69, 0xb1, 0x83, 0x7c, 0x58, 0x1d, 0x8f, + 0xcd, 0x2e, 0x75, 0x08, 0xaf, 0x1c, 0xcb, 0xfb, 0xef, 0xdd, 0x34, 0x32, 0x4f, 0xa8, 0x43, 0x30, + 0xf2, 0x26, 0x68, 0xe8, 0xcb, 0x70, 0x2b, 0x14, 0x85, 0xd4, 0x0a, 0x5e, 0x9a, 0xb6, 0xd5, 0x25, + 0x81, 0xc5, 0xd3, 0x60, 0x01, 0x17, 0x87, 0x8c, 0x0a, 0xa7, 0xa3, 0x55, 0xc8, 0x30, 0x65, 0x3c, + 0x1e, 0xaa, 0x39, 0x2c, 0x16, 0xea, 0x1e, 0x6c, 0x5e, 0x1a, 0x89, 0xc3, 0x23, 0x4a, 0xf2, 0xc8, + 0xdb, 0x70, 0xf7, 0xd5, 0x01, 0x73, 0xc9, 0xb9, 0x4f, 0x15, 0x80, 0xa1, 0x6b, 0xa7, 0x6f, 0x42, + 0x0f, 0x61, 0xdd, 0xf5, 0x6d, 0xaf, 0xef, 0x10, 0xf3, 0x9c, 0xf6, 0x7d, 0xc7, 0xf5, 0x3b, 0xe6, + 0x39, 0x7d, 0xc1, 0xab, 0x27, 0xb3, 0x6b, 0x55, 0x72, 0x0f, 0x24, 0xf3, 0x80, 0xf1, 0xd0, 0x7d, + 0x28, 0xc6, 0xa7, 0x48, 0x97, 0xf2, 0xa6, 0xc1, 0xb3, 0x60, 0x01, 0x17, 0x24, 0x5d, 0x97, 0x64, + 0xf5, 0x37, 0x8a, 0x2c, 0x52, 0xb2, 0xb4, 0x20, 0x9d, 0x3b, 0x31, 0x88, 0x4c, 0xd6, 0x2e, 0x4c, + 0xda, 0x6e, 0x87, 0x24, 0xe2, 0x3a, 0xe5, 0xf7, 0x37, 0xe3, 0x1b, 0x8b, 0x5b, 0x4a, 0xb9, 0x2a, + 0x5b, 0x0e, 0x2e, 0xf0, 0x33, 0x2d, 0xb7, 0x4b, 0x1a, 0xfc, 0x04, 0xd2, 0xa0, 0x40, 0x7c, 0x67, + 0x04, 0x24, 0x75, 0x15, 0xc8, 0x12, 0xf1, 0x9d, 0x21, 0x84, 0xfa, 0x89, 0x02, 0x8b, 0xfc, 0xe6, + 0x63, 0xd5, 0x9a, 0x30, 0x2f, 0xeb, 0x9e, 0x54, 0xe8, 0xa6, 0xe5, 0x33, 0x86, 0x41, 0x77, 0x01, + 0x78, 0x56, 0x38, 0x6c, 0x37, 0x57, 0x30, 0x85, 0x13, 0x14, 0xf5, 0x02, 0x80, 0x6b, 0x70, 0x18, + 0x58, 0x5d, 0x82, 0x1e, 0x41, 0x7e, 0x26, 0xa7, 0x40, 0x34, 0xf4, 0xc7, 0x55, 0x92, 0x3c, 0xc8, + 0xea, 0x7e, 0xe4, 0x46, 0x2f, 0x59, 0x93, 0x25, 0xfc, 0x17, 0xeb, 0x2a, 0xb2, 0xc9, 0x0a, 0x42, + 0xcd, 0x41, 0xdb, 0x90, 0x77, 0xc8, 0x20, 0xd5, 0x39, 0x4e, 0x0e, 0x27, 0x49, 0xac, 0x0d, 0x7b, + 0x96, 0xdf, 0xe9, 0x5b, 0x1d, 0x62, 0xda, 0x2c, 0xdb, 0x44, 0x7c, 0x2f, 0xc6, 0xc4, 0x0a, 0x75, + 0x88, 0xfa, 0x8f, 0x14, 0x14, 0xb8, 0x61, 0xda, 0xe0, 0xdb, 0x03, 0x1d, 0x41, 0x56, 0x88, 0x91, + 0x86, 0x7d, 0x75, 0x86, 0x82, 0xc9, 0xcf, 0x61, 0x79, 0x1e, 0xfd, 0x08, 0x6e, 0xd9, 0x56, 0x44, + 0x3a, 0x34, 0x78, 0x69, 0x72, 0x92, 0x2b, 0xbb, 0xfd, 0x4d, 0x40, 0x8b, 0x31, 0x94, 0x2e, 0x91, + 0x46, 0xda, 0x68, 0x7a, 0xd6, 0x36, 0x9a, 0x0c, 0xa8, 0x44, 0x1b, 0x3d, 0x86, 0x6c, 0x9b, 0xdd, + 0x31, 0xcb, 0x93, 0xf4, 0x6c, 0xe5, 0x71, 0x18, 0x20, 0x58, 0x62, 0xa8, 0x7f, 0x54, 0x60, 0x75, + 0xac, 0x26, 0xbc, 0x7e, 0x04, 0x3d, 0x83, 0xf5, 0x1e, 0x0d, 0x7c, 0xda, 0x09, 0xac, 0xde, 0xc5, + 0x4b, 0xd3, 0x73, 0x9f, 0x11, 0xcf, 0xbd, 0xa0, 0xd4, 0xe1, 0x51, 0xb0, 0x3c, 0x93, 0xca, 0x83, + 0xb3, 0x78, 0x2d, 0x81, 0x39, 0x24, 0xab, 0x21, 0x6c, 0x8e, 0x19, 0x90, 0x88, 0x94, 0xb3, 0x81, + 0xb3, 0xc4, 0x57, 0xcc, 0xfb, 0x37, 0x6e, 0xad, 0xa3, 0x6e, 0x7b, 0x06, 0x6b, 0x75, 0x1a, 0x74, + 0x2d, 0xcf, 0xfd, 0x09, 0x71, 0x12, 0x15, 0x0d, 0x21, 0x98, 0xf3, 0x48, 0x5b, 0xf8, 0x2b, 0x85, + 0xf9, 0x6f, 0x54, 0x84, 0x74, 0x44, 0x7b, 0x32, 0x93, 0xd8, 0x4f, 0x56, 0x41, 0x03, 0xb7, 0x73, + 0x21, 0x3e, 0x02, 0x53, 0x58, 0x2c, 0xd0, 0x3a, 0x64, 0xcf, 0x69, 0x14, 0xd1, 0x2e, 0xaf, 0x80, + 0x29, 0x2c, 0x57, 0xaa, 0x09, 0x79, 0x56, 0x7d, 0xff, 0x67, 0xb5, 0x45, 0xfd, 0x42, 0x01, 0x74, + 0x98, 0x6c, 0x97, 0x22, 0x04, 0x3e, 0x04, 0xb0, 0xa2, 0x28, 0x70, 0xcf, 0xfb, 0xd1, 0xc0, 0x81, + 0xdf, 0x99, 0xad, 0x19, 0x0f, 0x10, 0xb5, 0x18, 0x08, 0x27, 0x30, 0xc7, 0x83, 0x2c, 0x35, 0x43, + 0x90, 0xa9, 0xff, 0x51, 0x60, 0x7d, 0xba, 0x08, 0xf4, 0x31, 0x6c, 0xf8, 0x83, 0xdb, 0x19, 0xe9, + 0x46, 0xd2, 0x63, 0xdf, 0xbe, 0xbe, 0x15, 0x53, 0xaf, 0x19, 0xaf, 0xf9, 0x53, 0x6f, 0xff, 0x0c, + 0x16, 0x06, 0x5d, 0x4c, 0x54, 0x91, 0x47, 0x33, 0x04, 0x9c, 0x38, 0x39, 0xf4, 0xd4, 0x00, 0x4b, + 0xed, 0x43, 0x71, 0x9c, 0x8b, 0x3e, 0x80, 0x79, 0xc9, 0x97, 0x5f, 0x29, 0x7b, 0x33, 0x8b, 0xc2, + 0x31, 0x02, 0x0b, 0xc8, 0xd0, 0xa6, 0x41, 0x5c, 0xee, 0xc5, 0x42, 0xfd, 0x93, 0x02, 0x1b, 0xa3, + 0x2e, 0x1e, 0x66, 0xd6, 0x77, 0x27, 0x26, 0x84, 0xaf, 0xcf, 0x16, 0x1a, 0x93, 0x95, 0xad, 0x35, + 0x48, 0x56, 0xe1, 0xbb, 0xf7, 0x6e, 0x18, 0x6b, 0xa3, 0xa9, 0xfa, 0xd9, 0x3c, 0xac, 0xf3, 0xb0, + 0x1f, 0x2a, 0x8f, 0x49, 0xd8, 0xf7, 0xa2, 0xf0, 0xd5, 0x43, 0x62, 0x1f, 0x36, 0xa5, 0x66, 0xa6, + 0xf8, 0x34, 0x4c, 0x0c, 0xbf, 0x52, 0xc1, 0x77, 0x66, 0x2c, 0xbd, 0x09, 0x0d, 0x36, 0x24, 0xf6, + 0x18, 0x3d, 0x44, 0x14, 0xd6, 0xf9, 0xb8, 0x32, 0x29, 0x33, 0xfd, 0xba, 0x32, 0x57, 0x19, 0xf0, + 0x84, 0xc0, 0x8f, 0x60, 0x83, 0x7b, 0x6a, 0x8a, 0xc4, 0xb9, 0xd7, 0x95, 0xb8, 0xc6, 0x91, 0x27, + 0x44, 0xfe, 0x14, 0xb6, 0xc6, 0x3e, 0xf8, 0x93, 0x52, 0x97, 0xb8, 0x54, 0xed, 0xa6, 0x85, 0x66, + 0x28, 0xbd, 0xd4, 0x9e, 0xce, 0x08, 0x91, 0x05, 0x45, 0xee, 0xe4, 0xa4, 0xd8, 0xec, 0x6b, 0x8d, + 0xb9, 0x05, 0x86, 0x97, 0x14, 0x11, 0xc1, 0xca, 0x60, 0xde, 0x1b, 0x8a, 0x91, 0x23, 0x4d, 0xe5, + 0xc6, 0x6d, 0x28, 0x61, 0x1e, 0x8a, 0xf1, 0x13, 0x59, 0x19, 0xc0, 0xda, 0xb4, 0x79, 0x2e, 0x2c, + 0xe5, 0xb9, 0x75, 0xdf, 0x7a, 0xad, 0x49, 0x0e, 0xaf, 0x4e, 0x99, 0xe1, 0x42, 0xb4, 0x03, 0x19, + 0x12, 0x04, 0x34, 0x28, 0xe5, 0xb8, 0x6d, 0x28, 0x96, 0x11, 0xf4, 0xec, 0xb2, 0xc1, 0x1f, 0x6b, + 0xb0, 0xd8, 0xa0, 0x7e, 0xa6, 0xc0, 0xda, 0xd8, 0x6b, 0x4d, 0xd8, 0xa3, 0x7e, 0x48, 0x10, 0x05, + 0x34, 0x74, 0x92, 0x19, 0x88, 0xfc, 0x9c, 0xbd, 0xe5, 0x4c, 0xcf, 0x73, 0x7c, 0xcb, 0x1a, 0x27, + 0xa9, 0x7f, 0x57, 0x60, 0x63, 0x6c, 0x77, 0x33, 0xa0, 0x9d, 0x80, 0x84, 0x57, 0x94, 0x85, 0xfb, + 0x50, 0xec, 0xc9, 0x8d, 0x66, 0x8f, 0x04, 0x36, 0x6b, 0xc3, 0xac, 0x68, 0x66, 0x70, 0x21, 0xa6, + 0x37, 0x05, 0x19, 0xbd, 0x03, 0x30, 0x9c, 0x4f, 0xe4, 0x3b, 0xc3, 0xd6, 0x44, 0x73, 0x6b, 0xc5, + 0x6f, 0x5d, 0x38, 0x37, 0x98, 0x4c, 0xd0, 0xbb, 0x90, 0xef, 0xf7, 0x1c, 0x2b, 0x22, 0xe2, 0xec, + 0xdc, 0x95, 0x67, 0x41, 0x6c, 0x67, 0x04, 0xf5, 0x97, 0xe3, 0x6e, 0x1e, 0x58, 0x16, 0xc0, 0x4a, + 0xc2, 0xcd, 0xb1, 0xbe, 0xd2, 0xcf, 0xda, 0x8d, 0xfd, 0x1c, 0xe3, 0xe3, 0xc4, 0x25, 0xc6, 0x34, + 0xf5, 0xb7, 0x29, 0xd8, 0xbc, 0xf4, 0x59, 0x60, 0x72, 0x06, 0x50, 0x26, 0x67, 0x00, 0xe6, 0xf3, + 0xae, 0xf5, 0xc2, 0xb4, 0xbc, 0x88, 0x04, 0xbe, 0x15, 0xb9, 0xcf, 0xe5, 0x50, 0x99, 0xc1, 0x85, + 0xae, 0xf5, 0x42, 0x4b, 0x90, 0xd9, 0xd6, 0xb6, 0xcb, 0x08, 0xcc, 0xba, 0xb6, 0xe5, 0xb3, 0x21, + 0x21, 0x2d, 0xe6, 0x49, 0x41, 0x6f, 0xc6, 0x64, 0xf4, 0x21, 0x14, 0x64, 0xae, 0xc8, 0x27, 0xb9, + 0xb8, 0xe0, 0x7d, 0x63, 0xd6, 0x2c, 0x89, 0x1f, 0xe5, 0x96, 0xc3, 0xe4, 0x32, 0x44, 0x6f, 0xc2, + 0xa2, 0xd5, 0x77, 0x5c, 0xca, 0x92, 0xd1, 0x7e, 0x26, 0x4a, 0x4c, 0x06, 0xe7, 0x39, 0xad, 0xc5, + 0x49, 0xea, 0x7d, 0x58, 0x1a, 0xc1, 0x40, 0x25, 0x98, 0xef, 0x5d, 0x04, 0x56, 0x28, 0xbf, 0xb8, + 0x72, 0x38, 0x5e, 0xaa, 0x3f, 0x53, 0x60, 0x65, 0x8a, 0x23, 0xd1, 0x8f, 0x61, 0x71, 0xc4, 0x33, + 0xe2, 0x36, 0x0f, 0x67, 0x35, 0x02, 0x13, 0x9b, 0x76, 0x7c, 0x97, 0x97, 0xca, 0x21, 0x1c, 0x1e, + 0xc1, 0x56, 0x7f, 0xaf, 0xc0, 0xed, 0x57, 0x6d, 0x67, 0xc3, 0xe3, 0xb0, 0xf2, 0xc8, 0xcb, 0x4c, + 0x50, 0xae, 0x1a, 0x2e, 0xd1, 0x11, 0x64, 0x3e, 0xa6, 0x81, 0x13, 0x77, 0xbb, 0xfd, 0xeb, 0x5b, + 0xf1, 0x3d, 0x1a, 0x38, 0x35, 0xbf, 0x4d, 0xb1, 0x00, 0x50, 0x7f, 0xa5, 0xc0, 0x42, 0x4c, 0x43, + 0xdf, 0x1c, 0x49, 0xc5, 0x2b, 0x87, 0x99, 0x44, 0x26, 0x3e, 0x84, 0x85, 0xf8, 0x75, 0xe0, 0xea, + 0xef, 0xd3, 0x79, 0xf9, 0x2c, 0xc0, 0xc6, 0x00, 0xa6, 0x85, 0x9c, 0x68, 0xf9, 0xef, 0x07, 0xbf, + 0x53, 0x60, 0x5e, 0xbe, 0x0b, 0xa3, 0x0d, 0x58, 0x39, 0xd4, 0xb5, 0xd6, 0x29, 0xd6, 0xcd, 0xd3, + 0xba, 0xd1, 0xd4, 0x2b, 0xb5, 0xc3, 0x9a, 0x5e, 0x2d, 0xfe, 0x1f, 0x5a, 0x81, 0xc2, 0xb1, 0x76, + 0xa0, 0x1f, 0x9b, 0x55, 0xbd, 0xa5, 0x57, 0x5a, 0xb5, 0x46, 0xbd, 0xa8, 0xa0, 0x4d, 0x58, 0x33, + 0x8e, 0x1a, 0x2d, 0xb3, 0x72, 0xa4, 0xd5, 0x1f, 0xeb, 0x09, 0x56, 0x0a, 0xdd, 0x85, 0x2d, 0xfd, + 0xfb, 0xcd, 0xe3, 0x5a, 0xa5, 0xd6, 0x32, 0x2b, 0x8d, 0x7a, 0x4b, 0xaf, 0xb7, 0x12, 0xfc, 0x34, + 0x42, 0xb0, 0x7c, 0xa8, 0x55, 0x92, 0x67, 0x16, 0x50, 0x09, 0x56, 0x8d, 0xa6, 0xae, 0x57, 0x8e, + 0xcc, 0x16, 0xd6, 0xea, 0x46, 0x05, 0xd7, 0x9a, 0x9c, 0x93, 0x7d, 0x10, 0x00, 0x9a, 0x7c, 0xc0, + 0x42, 0xff, 0x0f, 0xdb, 0x63, 0x3a, 0x99, 0x27, 0x8d, 0xea, 0xb8, 0xe6, 0x4b, 0x90, 0xe3, 0x4a, + 0x32, 0x56, 0x51, 0x41, 0xcb, 0x00, 0x87, 0x58, 0x3b, 0xd1, 0xc5, 0x3a, 0xc5, 0x2c, 0xe6, 0x6c, + 0xad, 0x5e, 0x35, 0x13, 0x8c, 0xf4, 0x83, 0x08, 0x60, 0x38, 0xcd, 0xa1, 0x2d, 0x58, 0x3f, 0xae, + 0x7d, 0xa0, 0x1f, 0xd7, 0x8e, 0x1a, 0x8d, 0xea, 0x98, 0x84, 0x5b, 0xb0, 0x74, 0xa6, 0xe3, 0x27, + 0xe6, 0x69, 0x9d, 0x6f, 0x79, 0x52, 0x54, 0xd0, 0x22, 0x2c, 0x0c, 0x56, 0x29, 0xb6, 0x6a, 0x36, + 0x0c, 0xa3, 0x76, 0x70, 0xac, 0x17, 0xd3, 0x08, 0x20, 0x2b, 0x39, 0x73, 0xa8, 0x00, 0x79, 0x7e, + 0x54, 0x12, 0x32, 0x0f, 0xfe, 0xa9, 0xc0, 0xbc, 0xfc, 0x0a, 0x66, 0xaa, 0xe9, 0x27, 0x0d, 0x6e, + 0xd7, 0x84, 0x49, 0xda, 0xc9, 0xa9, 0xa1, 0x9f, 0xe8, 0xf5, 0x56, 0x51, 0x41, 0x39, 0xc8, 0x30, + 0xff, 0xe3, 0x62, 0x8a, 0xa9, 0x52, 0x69, 0xd4, 0x2b, 0x7a, 0xbd, 0x85, 0x35, 0xe9, 0xe9, 0x02, + 0xe4, 0xe5, 0x05, 0xf0, 0xed, 0x73, 0x4c, 0x7e, 0x55, 0x37, 0x6a, 0x58, 0x2f, 0x66, 0xd8, 0x35, + 0x54, 0x6b, 0x86, 0xd6, 0x6c, 0x36, 0x6a, 0x92, 0x9f, 0x45, 0x79, 0x98, 0xaf, 0xd6, 0x8c, 0xc7, + 0xa7, 0x46, 0xab, 0x38, 0xcf, 0x16, 0xfa, 0xb1, 0x26, 0x2f, 0xe8, 0x16, 0x2c, 0xe9, 0x27, 0x07, + 0x1a, 0xc6, 0x9a, 0x61, 0xf0, 0xcd, 0x39, 0x66, 0x5a, 0xad, 0xde, 0xd2, 0xb1, 0x6e, 0xb4, 0x8a, + 0xc0, 0x34, 0x69, 0xe2, 0x5a, 0x55, 0x2f, 0xe6, 0xd9, 0x41, 0x43, 0xab, 0xd6, 0x75, 0xc3, 0x28, + 0x2e, 0xb2, 0x5d, 0xc6, 0x29, 0x6e, 0xe2, 0x9a, 0xa1, 0x17, 0x97, 0xf6, 0xbf, 0x50, 0xa0, 0xc4, + 0x2b, 0x75, 0x2d, 0x91, 0x31, 0x06, 0x09, 0x9e, 0xbb, 0x36, 0x41, 0xbf, 0x56, 0x60, 0x69, 0xa4, + 0x49, 0xa0, 0x19, 0x66, 0xe3, 0x69, 0x7f, 0xb9, 0x6c, 0xdd, 0x89, 0xcf, 0x27, 0xfe, 0x0b, 0x2a, + 0x37, 0xe2, 0xff, 0x82, 0xd4, 0x2f, 0xfd, 0xfc, 0x6f, 0xff, 0xfa, 0x3c, 0x75, 0x4f, 0xdd, 0x1a, + 0xff, 0x7b, 0x2a, 0x7c, 0x24, 0xbb, 0x06, 0x79, 0xa4, 0x3c, 0x38, 0xf8, 0x24, 0x05, 0x5f, 0xb1, + 0x69, 0xf7, 0xda, 0xba, 0x1c, 0xdc, 0xb9, 0xcc, 0xc4, 0x26, 0x4b, 0xcf, 0xa6, 0xf2, 0x83, 0x27, + 0x12, 0xaa, 0x43, 0x59, 0x53, 0x29, 0xd3, 0xa0, 0xb3, 0xdb, 0x21, 0x3e, 0x4f, 0xde, 0x5d, 0xc1, + 0xb2, 0x7a, 0x6e, 0x78, 0xf5, 0x9f, 0x69, 0xef, 0x4e, 0xf0, 0xfe, 0x90, 0xda, 0x79, 0x2c, 0xb0, + 0x2b, 0x5c, 0xcd, 0x09, 0x4d, 0xca, 0x67, 0x7b, 0xcd, 0xbd, 0x03, 0x76, 0xf8, 0xaf, 0xf1, 0xd6, + 0xa7, 0x7c, 0xeb, 0xd3, 0x89, 0xad, 0x4f, 0xcf, 0x62, 0x39, 0xe7, 0x59, 0xae, 0xdb, 0xd7, 0xfe, + 0x1b, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x29, 0xc9, 0x61, 0xe7, 0x1b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go index cc15feed1..9fa5d82d3 100644 --- a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v1/trace.pb.go @@ -218,9 +218,11 @@ type TraceSpan struct { // * `/http/client_region` // * `/http/host` // * `/http/method` + // * `/http/path` // * `/http/redirected_url` // * `/http/request/size` // * `/http/response/size` + // * `/http/route` // * `/http/status_code` // * `/http/url` // * `/http/user_agent` diff --git a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/dlp.pb.go b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/dlp.pb.go new file mode 100644 index 000000000..96d5750d0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/dlp.pb.go @@ -0,0 +1,9624 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/privacy/dlp/v2/dlp.proto + +/* +Package dlp is a generated protocol buffer package. + +It is generated from these files: + google/privacy/dlp/v2/dlp.proto + google/privacy/dlp/v2/storage.proto + +It has these top-level messages: + InspectConfig + ByteContentItem + ContentItem + Table + InspectResult + Finding + Location + ContentLocation + DocumentLocation + RecordLocation + TableLocation + Range + ImageLocation + BoundingBox + RedactImageRequest + Color + RedactImageResponse + DeidentifyContentRequest + DeidentifyContentResponse + ReidentifyContentRequest + ReidentifyContentResponse + InspectContentRequest + InspectContentResponse + OutputStorageConfig + InfoTypeStats + InspectDataSourceDetails + InfoTypeDescription + ListInfoTypesRequest + ListInfoTypesResponse + RiskAnalysisJobConfig + PrivacyMetric + AnalyzeDataSourceRiskDetails + ValueFrequency + Value + QuoteInfo + DateTime + DeidentifyConfig + PrimitiveTransformation + TimePartConfig + CryptoHashConfig + ReplaceValueConfig + ReplaceWithInfoTypeConfig + RedactConfig + CharsToIgnore + CharacterMaskConfig + FixedSizeBucketingConfig + BucketingConfig + CryptoReplaceFfxFpeConfig + CryptoKey + TransientCryptoKey + UnwrappedCryptoKey + KmsWrappedCryptoKey + DateShiftConfig + InfoTypeTransformations + FieldTransformation + RecordTransformations + RecordSuppression + RecordCondition + TransformationOverview + TransformationSummary + Schedule + InspectTemplate + DeidentifyTemplate + Error + JobTrigger + Action + CreateInspectTemplateRequest + UpdateInspectTemplateRequest + GetInspectTemplateRequest + ListInspectTemplatesRequest + ListInspectTemplatesResponse + DeleteInspectTemplateRequest + CreateJobTriggerRequest + UpdateJobTriggerRequest + GetJobTriggerRequest + CreateDlpJobRequest + ListJobTriggersRequest + ListJobTriggersResponse + DeleteJobTriggerRequest + InspectJobConfig + DlpJob + GetDlpJobRequest + ListDlpJobsRequest + ListDlpJobsResponse + CancelDlpJobRequest + DeleteDlpJobRequest + CreateDeidentifyTemplateRequest + UpdateDeidentifyTemplateRequest + GetDeidentifyTemplateRequest + ListDeidentifyTemplatesRequest + ListDeidentifyTemplatesResponse + DeleteDeidentifyTemplateRequest + InfoType + CustomInfoType + FieldId + PartitionId + KindExpression + DatastoreOptions + CloudStorageOptions + BigQueryOptions + StorageConfig + BigQueryKey + DatastoreKey + Key + RecordKey + BigQueryTable +*/ +package dlp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf4 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" +import google_type "google.golang.org/genproto/googleapis/type/date" +import google_type1 "google.golang.org/genproto/googleapis/type/dayofweek" +import google_type2 "google.golang.org/genproto/googleapis/type/timeofday" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Options describing which parts of the provided content should be scanned. +type ContentOption int32 + +const ( + // Includes entire content of a file or a data stream. + ContentOption_CONTENT_UNSPECIFIED ContentOption = 0 + // Text content within the data, excluding any metadata. + ContentOption_CONTENT_TEXT ContentOption = 1 + // Images found in the data. + ContentOption_CONTENT_IMAGE ContentOption = 2 +) + +var ContentOption_name = map[int32]string{ + 0: "CONTENT_UNSPECIFIED", + 1: "CONTENT_TEXT", + 2: "CONTENT_IMAGE", +} +var ContentOption_value = map[string]int32{ + "CONTENT_UNSPECIFIED": 0, + "CONTENT_TEXT": 1, + "CONTENT_IMAGE": 2, +} + +func (x ContentOption) String() string { + return proto.EnumName(ContentOption_name, int32(x)) +} +func (ContentOption) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// Parts of the APIs which use certain infoTypes. +type InfoTypeSupportedBy int32 + +const ( + InfoTypeSupportedBy_ENUM_TYPE_UNSPECIFIED InfoTypeSupportedBy = 0 + // Supported by the inspect operations. + InfoTypeSupportedBy_INSPECT InfoTypeSupportedBy = 1 + // Supported by the risk analysis operations. + InfoTypeSupportedBy_RISK_ANALYSIS InfoTypeSupportedBy = 2 +) + +var InfoTypeSupportedBy_name = map[int32]string{ + 0: "ENUM_TYPE_UNSPECIFIED", + 1: "INSPECT", + 2: "RISK_ANALYSIS", +} +var InfoTypeSupportedBy_value = map[string]int32{ + "ENUM_TYPE_UNSPECIFIED": 0, + "INSPECT": 1, + "RISK_ANALYSIS": 2, +} + +func (x InfoTypeSupportedBy) String() string { + return proto.EnumName(InfoTypeSupportedBy_name, int32(x)) +} +func (InfoTypeSupportedBy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// Operators available for comparing the value of fields. +type RelationalOperator int32 + +const ( + RelationalOperator_RELATIONAL_OPERATOR_UNSPECIFIED RelationalOperator = 0 + // Equal. + RelationalOperator_EQUAL_TO RelationalOperator = 1 + // Not equal to. + RelationalOperator_NOT_EQUAL_TO RelationalOperator = 2 + // Greater than. + RelationalOperator_GREATER_THAN RelationalOperator = 3 + // Less than. + RelationalOperator_LESS_THAN RelationalOperator = 4 + // Greater than or equals. + RelationalOperator_GREATER_THAN_OR_EQUALS RelationalOperator = 5 + // Less than or equals. + RelationalOperator_LESS_THAN_OR_EQUALS RelationalOperator = 6 + // Exists + RelationalOperator_EXISTS RelationalOperator = 7 +) + +var RelationalOperator_name = map[int32]string{ + 0: "RELATIONAL_OPERATOR_UNSPECIFIED", + 1: "EQUAL_TO", + 2: "NOT_EQUAL_TO", + 3: "GREATER_THAN", + 4: "LESS_THAN", + 5: "GREATER_THAN_OR_EQUALS", + 6: "LESS_THAN_OR_EQUALS", + 7: "EXISTS", +} +var RelationalOperator_value = map[string]int32{ + "RELATIONAL_OPERATOR_UNSPECIFIED": 0, + "EQUAL_TO": 1, + "NOT_EQUAL_TO": 2, + "GREATER_THAN": 3, + "LESS_THAN": 4, + "GREATER_THAN_OR_EQUALS": 5, + "LESS_THAN_OR_EQUALS": 6, + "EXISTS": 7, +} + +func (x RelationalOperator) String() string { + return proto.EnumName(RelationalOperator_name, int32(x)) +} +func (RelationalOperator) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// An enum to represent the various type of DLP jobs. +type DlpJobType int32 + +const ( + DlpJobType_DLP_JOB_TYPE_UNSPECIFIED DlpJobType = 0 + // The job inspected Google Cloud for sensitive data. + DlpJobType_INSPECT_JOB DlpJobType = 1 + // The job executed a Risk Analysis computation. + DlpJobType_RISK_ANALYSIS_JOB DlpJobType = 2 +) + +var DlpJobType_name = map[int32]string{ + 0: "DLP_JOB_TYPE_UNSPECIFIED", + 1: "INSPECT_JOB", + 2: "RISK_ANALYSIS_JOB", +} +var DlpJobType_value = map[string]int32{ + "DLP_JOB_TYPE_UNSPECIFIED": 0, + "INSPECT_JOB": 1, + "RISK_ANALYSIS_JOB": 2, +} + +func (x DlpJobType) String() string { + return proto.EnumName(DlpJobType_name, int32(x)) +} +func (DlpJobType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type ByteContentItem_BytesType int32 + +const ( + ByteContentItem_BYTES_TYPE_UNSPECIFIED ByteContentItem_BytesType = 0 + ByteContentItem_IMAGE_JPEG ByteContentItem_BytesType = 1 + ByteContentItem_IMAGE_BMP ByteContentItem_BytesType = 2 + ByteContentItem_IMAGE_PNG ByteContentItem_BytesType = 3 + ByteContentItem_IMAGE_SVG ByteContentItem_BytesType = 4 + ByteContentItem_TEXT_UTF8 ByteContentItem_BytesType = 5 +) + +var ByteContentItem_BytesType_name = map[int32]string{ + 0: "BYTES_TYPE_UNSPECIFIED", + 1: "IMAGE_JPEG", + 2: "IMAGE_BMP", + 3: "IMAGE_PNG", + 4: "IMAGE_SVG", + 5: "TEXT_UTF8", +} +var ByteContentItem_BytesType_value = map[string]int32{ + "BYTES_TYPE_UNSPECIFIED": 0, + "IMAGE_JPEG": 1, + "IMAGE_BMP": 2, + "IMAGE_PNG": 3, + "IMAGE_SVG": 4, + "TEXT_UTF8": 5, +} + +func (x ByteContentItem_BytesType) String() string { + return proto.EnumName(ByteContentItem_BytesType_name, int32(x)) +} +func (ByteContentItem_BytesType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// Predefined schemas for storing findings. +type OutputStorageConfig_OutputSchema int32 + +const ( + OutputStorageConfig_OUTPUT_SCHEMA_UNSPECIFIED OutputStorageConfig_OutputSchema = 0 + // Basic schema including only `info_type`, `quote`, `certainty`, and + // `timestamp`. + OutputStorageConfig_BASIC_COLUMNS OutputStorageConfig_OutputSchema = 1 + // Schema tailored to findings from scanning Google Cloud Storage. + OutputStorageConfig_GCS_COLUMNS OutputStorageConfig_OutputSchema = 2 + // Schema tailored to findings from scanning Google Datastore. + OutputStorageConfig_DATASTORE_COLUMNS OutputStorageConfig_OutputSchema = 3 + // Schema tailored to findings from scanning Google BigQuery. + OutputStorageConfig_BIG_QUERY_COLUMNS OutputStorageConfig_OutputSchema = 4 + // Schema containing all columns. + OutputStorageConfig_ALL_COLUMNS OutputStorageConfig_OutputSchema = 5 +) + +var OutputStorageConfig_OutputSchema_name = map[int32]string{ + 0: "OUTPUT_SCHEMA_UNSPECIFIED", + 1: "BASIC_COLUMNS", + 2: "GCS_COLUMNS", + 3: "DATASTORE_COLUMNS", + 4: "BIG_QUERY_COLUMNS", + 5: "ALL_COLUMNS", +} +var OutputStorageConfig_OutputSchema_value = map[string]int32{ + "OUTPUT_SCHEMA_UNSPECIFIED": 0, + "BASIC_COLUMNS": 1, + "GCS_COLUMNS": 2, + "DATASTORE_COLUMNS": 3, + "BIG_QUERY_COLUMNS": 4, + "ALL_COLUMNS": 5, +} + +func (x OutputStorageConfig_OutputSchema) String() string { + return proto.EnumName(OutputStorageConfig_OutputSchema_name, int32(x)) +} +func (OutputStorageConfig_OutputSchema) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{23, 0} +} + +type TimePartConfig_TimePart int32 + +const ( + TimePartConfig_TIME_PART_UNSPECIFIED TimePartConfig_TimePart = 0 + // [0-9999] + TimePartConfig_YEAR TimePartConfig_TimePart = 1 + // [1-12] + TimePartConfig_MONTH TimePartConfig_TimePart = 2 + // [1-31] + TimePartConfig_DAY_OF_MONTH TimePartConfig_TimePart = 3 + // [1-7] + TimePartConfig_DAY_OF_WEEK TimePartConfig_TimePart = 4 + // [1-52] + TimePartConfig_WEEK_OF_YEAR TimePartConfig_TimePart = 5 + // [0-23] + TimePartConfig_HOUR_OF_DAY TimePartConfig_TimePart = 6 +) + +var TimePartConfig_TimePart_name = map[int32]string{ + 0: "TIME_PART_UNSPECIFIED", + 1: "YEAR", + 2: "MONTH", + 3: "DAY_OF_MONTH", + 4: "DAY_OF_WEEK", + 5: "WEEK_OF_YEAR", + 6: "HOUR_OF_DAY", +} +var TimePartConfig_TimePart_value = map[string]int32{ + "TIME_PART_UNSPECIFIED": 0, + "YEAR": 1, + "MONTH": 2, + "DAY_OF_MONTH": 3, + "DAY_OF_WEEK": 4, + "WEEK_OF_YEAR": 5, + "HOUR_OF_DAY": 6, +} + +func (x TimePartConfig_TimePart) String() string { + return proto.EnumName(TimePartConfig_TimePart_name, int32(x)) +} +func (TimePartConfig_TimePart) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{38, 0} } + +type CharsToIgnore_CommonCharsToIgnore int32 + +const ( + CharsToIgnore_COMMON_CHARS_TO_IGNORE_UNSPECIFIED CharsToIgnore_CommonCharsToIgnore = 0 + // 0-9 + CharsToIgnore_NUMERIC CharsToIgnore_CommonCharsToIgnore = 1 + // A-Z + CharsToIgnore_ALPHA_UPPER_CASE CharsToIgnore_CommonCharsToIgnore = 2 + // a-z + CharsToIgnore_ALPHA_LOWER_CASE CharsToIgnore_CommonCharsToIgnore = 3 + // US Punctuation, one of !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ + CharsToIgnore_PUNCTUATION CharsToIgnore_CommonCharsToIgnore = 4 + // Whitespace character, one of [ \t\n\x0B\f\r] + CharsToIgnore_WHITESPACE CharsToIgnore_CommonCharsToIgnore = 5 +) + +var CharsToIgnore_CommonCharsToIgnore_name = map[int32]string{ + 0: "COMMON_CHARS_TO_IGNORE_UNSPECIFIED", + 1: "NUMERIC", + 2: "ALPHA_UPPER_CASE", + 3: "ALPHA_LOWER_CASE", + 4: "PUNCTUATION", + 5: "WHITESPACE", +} +var CharsToIgnore_CommonCharsToIgnore_value = map[string]int32{ + "COMMON_CHARS_TO_IGNORE_UNSPECIFIED": 0, + "NUMERIC": 1, + "ALPHA_UPPER_CASE": 2, + "ALPHA_LOWER_CASE": 3, + "PUNCTUATION": 4, + "WHITESPACE": 5, +} + +func (x CharsToIgnore_CommonCharsToIgnore) String() string { + return proto.EnumName(CharsToIgnore_CommonCharsToIgnore_name, int32(x)) +} +func (CharsToIgnore_CommonCharsToIgnore) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{43, 0} +} + +// These are commonly used subsets of the alphabet that the FFX mode +// natively supports. In the algorithm, the alphabet is selected using +// the "radix". Therefore each corresponds to particular radix. +type CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet int32 + +const ( + CryptoReplaceFfxFpeConfig_FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet = 0 + // [0-9] (radix of 10) + CryptoReplaceFfxFpeConfig_NUMERIC CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet = 1 + // [0-9A-F] (radix of 16) + CryptoReplaceFfxFpeConfig_HEXADECIMAL CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet = 2 + // [0-9A-Z] (radix of 36) + CryptoReplaceFfxFpeConfig_UPPER_CASE_ALPHA_NUMERIC CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet = 3 + // [0-9A-Za-z] (radix of 62) + CryptoReplaceFfxFpeConfig_ALPHA_NUMERIC CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet = 4 +) + +var CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_name = map[int32]string{ + 0: "FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", + 1: "NUMERIC", + 2: "HEXADECIMAL", + 3: "UPPER_CASE_ALPHA_NUMERIC", + 4: "ALPHA_NUMERIC", +} +var CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_value = map[string]int32{ + "FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED": 0, + "NUMERIC": 1, + "HEXADECIMAL": 2, + "UPPER_CASE_ALPHA_NUMERIC": 3, + "ALPHA_NUMERIC": 4, +} + +func (x CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet) String() string { + return proto.EnumName(CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_name, int32(x)) +} +func (CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{47, 0} +} + +type RecordCondition_Expressions_LogicalOperator int32 + +const ( + RecordCondition_Expressions_LOGICAL_OPERATOR_UNSPECIFIED RecordCondition_Expressions_LogicalOperator = 0 + RecordCondition_Expressions_AND RecordCondition_Expressions_LogicalOperator = 1 +) + +var RecordCondition_Expressions_LogicalOperator_name = map[int32]string{ + 0: "LOGICAL_OPERATOR_UNSPECIFIED", + 1: "AND", +} +var RecordCondition_Expressions_LogicalOperator_value = map[string]int32{ + "LOGICAL_OPERATOR_UNSPECIFIED": 0, + "AND": 1, +} + +func (x RecordCondition_Expressions_LogicalOperator) String() string { + return proto.EnumName(RecordCondition_Expressions_LogicalOperator_name, int32(x)) +} +func (RecordCondition_Expressions_LogicalOperator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{57, 2, 0} +} + +// Possible outcomes of transformations. +type TransformationSummary_TransformationResultCode int32 + +const ( + TransformationSummary_TRANSFORMATION_RESULT_CODE_UNSPECIFIED TransformationSummary_TransformationResultCode = 0 + TransformationSummary_SUCCESS TransformationSummary_TransformationResultCode = 1 + TransformationSummary_ERROR TransformationSummary_TransformationResultCode = 2 +) + +var TransformationSummary_TransformationResultCode_name = map[int32]string{ + 0: "TRANSFORMATION_RESULT_CODE_UNSPECIFIED", + 1: "SUCCESS", + 2: "ERROR", +} +var TransformationSummary_TransformationResultCode_value = map[string]int32{ + "TRANSFORMATION_RESULT_CODE_UNSPECIFIED": 0, + "SUCCESS": 1, + "ERROR": 2, +} + +func (x TransformationSummary_TransformationResultCode) String() string { + return proto.EnumName(TransformationSummary_TransformationResultCode_name, int32(x)) +} +func (TransformationSummary_TransformationResultCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{59, 0} +} + +// Whether the trigger is currently active. If PAUSED or CANCELLED, no jobs +// will be created with this configuration. The service may automatically +// pause triggers experiencing frequent errors. To restart a job, set the +// status to HEALTHY after correcting user errors. +type JobTrigger_Status int32 + +const ( + JobTrigger_STATUS_UNSPECIFIED JobTrigger_Status = 0 + // Trigger is healthy. + JobTrigger_HEALTHY JobTrigger_Status = 1 + // Trigger is temporarily paused. + JobTrigger_PAUSED JobTrigger_Status = 2 + // Trigger is cancelled and can not be resumed. + JobTrigger_CANCELLED JobTrigger_Status = 3 +) + +var JobTrigger_Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "HEALTHY", + 2: "PAUSED", + 3: "CANCELLED", +} +var JobTrigger_Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "HEALTHY": 1, + "PAUSED": 2, + "CANCELLED": 3, +} + +func (x JobTrigger_Status) String() string { + return proto.EnumName(JobTrigger_Status_name, int32(x)) +} +func (JobTrigger_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{64, 0} } + +type DlpJob_JobState int32 + +const ( + DlpJob_JOB_STATE_UNSPECIFIED DlpJob_JobState = 0 + // The job has not yet started. + DlpJob_PENDING DlpJob_JobState = 1 + // The job is currently running. + DlpJob_RUNNING DlpJob_JobState = 2 + // The job is no longer running. + DlpJob_DONE DlpJob_JobState = 3 + // The job was canceled before it could complete. + DlpJob_CANCELED DlpJob_JobState = 4 + // The job had an error and did not complete. + DlpJob_FAILED DlpJob_JobState = 5 +) + +var DlpJob_JobState_name = map[int32]string{ + 0: "JOB_STATE_UNSPECIFIED", + 1: "PENDING", + 2: "RUNNING", + 3: "DONE", + 4: "CANCELED", + 5: "FAILED", +} +var DlpJob_JobState_value = map[string]int32{ + "JOB_STATE_UNSPECIFIED": 0, + "PENDING": 1, + "RUNNING": 2, + "DONE": 3, + "CANCELED": 4, + "FAILED": 5, +} + +func (x DlpJob_JobState) String() string { + return proto.EnumName(DlpJob_JobState_name, int32(x)) +} +func (DlpJob_JobState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{80, 0} } + +// Configuration description of the scanning process. +// When used with redactContent only info_types and min_likelihood are currently +// used. +type InspectConfig struct { + // Restricts what info_types to look for. The values must correspond to + // InfoType values returned by ListInfoTypes or found in documentation. + // Empty info_types runs all enabled detectors. + InfoTypes []*InfoType `protobuf:"bytes,1,rep,name=info_types,json=infoTypes" json:"info_types,omitempty"` + // Only returns findings equal or above this threshold. The default is + // POSSIBLE. + MinLikelihood Likelihood `protobuf:"varint,2,opt,name=min_likelihood,json=minLikelihood,enum=google.privacy.dlp.v2.Likelihood" json:"min_likelihood,omitempty"` + Limits *InspectConfig_FindingLimits `protobuf:"bytes,3,opt,name=limits" json:"limits,omitempty"` + // When true, a contextual quote from the data that triggered a finding is + // included in the response; see Finding.quote. + IncludeQuote bool `protobuf:"varint,4,opt,name=include_quote,json=includeQuote" json:"include_quote,omitempty"` + // When true, excludes type information of the findings. + ExcludeInfoTypes bool `protobuf:"varint,5,opt,name=exclude_info_types,json=excludeInfoTypes" json:"exclude_info_types,omitempty"` + // Custom infoTypes provided by the user. + CustomInfoTypes []*CustomInfoType `protobuf:"bytes,6,rep,name=custom_info_types,json=customInfoTypes" json:"custom_info_types,omitempty"` + // List of options defining data content to scan. + // If empty, text, images, and other content will be included. + ContentOptions []ContentOption `protobuf:"varint,8,rep,packed,name=content_options,json=contentOptions,enum=google.privacy.dlp.v2.ContentOption" json:"content_options,omitempty"` +} + +func (m *InspectConfig) Reset() { *m = InspectConfig{} } +func (m *InspectConfig) String() string { return proto.CompactTextString(m) } +func (*InspectConfig) ProtoMessage() {} +func (*InspectConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *InspectConfig) GetInfoTypes() []*InfoType { + if m != nil { + return m.InfoTypes + } + return nil +} + +func (m *InspectConfig) GetMinLikelihood() Likelihood { + if m != nil { + return m.MinLikelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *InspectConfig) GetLimits() *InspectConfig_FindingLimits { + if m != nil { + return m.Limits + } + return nil +} + +func (m *InspectConfig) GetIncludeQuote() bool { + if m != nil { + return m.IncludeQuote + } + return false +} + +func (m *InspectConfig) GetExcludeInfoTypes() bool { + if m != nil { + return m.ExcludeInfoTypes + } + return false +} + +func (m *InspectConfig) GetCustomInfoTypes() []*CustomInfoType { + if m != nil { + return m.CustomInfoTypes + } + return nil +} + +func (m *InspectConfig) GetContentOptions() []ContentOption { + if m != nil { + return m.ContentOptions + } + return nil +} + +type InspectConfig_FindingLimits struct { + // Max number of findings that will be returned for each item scanned. + // When set within `InspectDataSourceRequest`, + // the maximum returned is 1000 regardless if this is set higher. + // When set within `InspectContentRequest`, this field is ignored. + MaxFindingsPerItem int32 `protobuf:"varint,1,opt,name=max_findings_per_item,json=maxFindingsPerItem" json:"max_findings_per_item,omitempty"` + // Max number of findings that will be returned per request/job. + // When set within `InspectContentRequest`, the maximum returned is 1000 + // regardless if this is set higher. + MaxFindingsPerRequest int32 `protobuf:"varint,2,opt,name=max_findings_per_request,json=maxFindingsPerRequest" json:"max_findings_per_request,omitempty"` + // Configuration of findings limit given for specified infoTypes. + MaxFindingsPerInfoType []*InspectConfig_FindingLimits_InfoTypeLimit `protobuf:"bytes,3,rep,name=max_findings_per_info_type,json=maxFindingsPerInfoType" json:"max_findings_per_info_type,omitempty"` +} + +func (m *InspectConfig_FindingLimits) Reset() { *m = InspectConfig_FindingLimits{} } +func (m *InspectConfig_FindingLimits) String() string { return proto.CompactTextString(m) } +func (*InspectConfig_FindingLimits) ProtoMessage() {} +func (*InspectConfig_FindingLimits) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +func (m *InspectConfig_FindingLimits) GetMaxFindingsPerItem() int32 { + if m != nil { + return m.MaxFindingsPerItem + } + return 0 +} + +func (m *InspectConfig_FindingLimits) GetMaxFindingsPerRequest() int32 { + if m != nil { + return m.MaxFindingsPerRequest + } + return 0 +} + +func (m *InspectConfig_FindingLimits) GetMaxFindingsPerInfoType() []*InspectConfig_FindingLimits_InfoTypeLimit { + if m != nil { + return m.MaxFindingsPerInfoType + } + return nil +} + +// Max findings configuration per infoType, per content item or long +// running DlpJob. +type InspectConfig_FindingLimits_InfoTypeLimit struct { + // Type of information the findings limit applies to. Only one limit per + // info_type should be provided. If InfoTypeLimit does not have an + // info_type, the DLP API applies the limit against all info_types that + // are found but not specified in another InfoTypeLimit. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Max findings limit for the given infoType. + MaxFindings int32 `protobuf:"varint,2,opt,name=max_findings,json=maxFindings" json:"max_findings,omitempty"` +} + +func (m *InspectConfig_FindingLimits_InfoTypeLimit) Reset() { + *m = InspectConfig_FindingLimits_InfoTypeLimit{} +} +func (m *InspectConfig_FindingLimits_InfoTypeLimit) String() string { return proto.CompactTextString(m) } +func (*InspectConfig_FindingLimits_InfoTypeLimit) ProtoMessage() {} +func (*InspectConfig_FindingLimits_InfoTypeLimit) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{0, 0, 0} +} + +func (m *InspectConfig_FindingLimits_InfoTypeLimit) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *InspectConfig_FindingLimits_InfoTypeLimit) GetMaxFindings() int32 { + if m != nil { + return m.MaxFindings + } + return 0 +} + +// Container for bytes to inspect or redact. +type ByteContentItem struct { + // The type of data stored in the bytes string. Default will be TEXT_UTF8. + Type ByteContentItem_BytesType `protobuf:"varint,1,opt,name=type,enum=google.privacy.dlp.v2.ByteContentItem_BytesType" json:"type,omitempty"` + // Content data to inspect or redact. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ByteContentItem) Reset() { *m = ByteContentItem{} } +func (m *ByteContentItem) String() string { return proto.CompactTextString(m) } +func (*ByteContentItem) ProtoMessage() {} +func (*ByteContentItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ByteContentItem) GetType() ByteContentItem_BytesType { + if m != nil { + return m.Type + } + return ByteContentItem_BYTES_TYPE_UNSPECIFIED +} + +func (m *ByteContentItem) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Container structure for the content to inspect. +type ContentItem struct { + // Data of the item either in the byte array or UTF-8 string form, or table. + // + // Types that are valid to be assigned to DataItem: + // *ContentItem_Value + // *ContentItem_Table + // *ContentItem_ByteItem + DataItem isContentItem_DataItem `protobuf_oneof:"data_item"` +} + +func (m *ContentItem) Reset() { *m = ContentItem{} } +func (m *ContentItem) String() string { return proto.CompactTextString(m) } +func (*ContentItem) ProtoMessage() {} +func (*ContentItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isContentItem_DataItem interface { + isContentItem_DataItem() +} + +type ContentItem_Value struct { + Value string `protobuf:"bytes,3,opt,name=value,oneof"` +} +type ContentItem_Table struct { + Table *Table `protobuf:"bytes,4,opt,name=table,oneof"` +} +type ContentItem_ByteItem struct { + ByteItem *ByteContentItem `protobuf:"bytes,5,opt,name=byte_item,json=byteItem,oneof"` +} + +func (*ContentItem_Value) isContentItem_DataItem() {} +func (*ContentItem_Table) isContentItem_DataItem() {} +func (*ContentItem_ByteItem) isContentItem_DataItem() {} + +func (m *ContentItem) GetDataItem() isContentItem_DataItem { + if m != nil { + return m.DataItem + } + return nil +} + +func (m *ContentItem) GetValue() string { + if x, ok := m.GetDataItem().(*ContentItem_Value); ok { + return x.Value + } + return "" +} + +func (m *ContentItem) GetTable() *Table { + if x, ok := m.GetDataItem().(*ContentItem_Table); ok { + return x.Table + } + return nil +} + +func (m *ContentItem) GetByteItem() *ByteContentItem { + if x, ok := m.GetDataItem().(*ContentItem_ByteItem); ok { + return x.ByteItem + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ContentItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ContentItem_OneofMarshaler, _ContentItem_OneofUnmarshaler, _ContentItem_OneofSizer, []interface{}{ + (*ContentItem_Value)(nil), + (*ContentItem_Table)(nil), + (*ContentItem_ByteItem)(nil), + } +} + +func _ContentItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ContentItem) + // data_item + switch x := m.DataItem.(type) { + case *ContentItem_Value: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Value) + case *ContentItem_Table: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Table); err != nil { + return err + } + case *ContentItem_ByteItem: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ByteItem); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ContentItem.DataItem has unexpected type %T", x) + } + return nil +} + +func _ContentItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ContentItem) + switch tag { + case 3: // data_item.value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.DataItem = &ContentItem_Value{x} + return true, err + case 4: // data_item.table + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Table) + err := b.DecodeMessage(msg) + m.DataItem = &ContentItem_Table{msg} + return true, err + case 5: // data_item.byte_item + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ByteContentItem) + err := b.DecodeMessage(msg) + m.DataItem = &ContentItem_ByteItem{msg} + return true, err + default: + return false, nil + } +} + +func _ContentItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ContentItem) + // data_item + switch x := m.DataItem.(type) { + case *ContentItem_Value: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Value))) + n += len(x.Value) + case *ContentItem_Table: + s := proto.Size(x.Table) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ContentItem_ByteItem: + s := proto.Size(x.ByteItem) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Structured content to inspect. Up to 50,000 `Value`s per request allowed. +type Table struct { + Headers []*FieldId `protobuf:"bytes,1,rep,name=headers" json:"headers,omitempty"` + Rows []*Table_Row `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` +} + +func (m *Table) Reset() { *m = Table{} } +func (m *Table) String() string { return proto.CompactTextString(m) } +func (*Table) ProtoMessage() {} +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Table) GetHeaders() []*FieldId { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Table) GetRows() []*Table_Row { + if m != nil { + return m.Rows + } + return nil +} + +type Table_Row struct { + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *Table_Row) Reset() { *m = Table_Row{} } +func (m *Table_Row) String() string { return proto.CompactTextString(m) } +func (*Table_Row) ProtoMessage() {} +func (*Table_Row) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +func (m *Table_Row) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// All the findings for a single scanned item. +type InspectResult struct { + // List of findings for an item. + Findings []*Finding `protobuf:"bytes,1,rep,name=findings" json:"findings,omitempty"` + // If true, then this item might have more findings than were returned, + // and the findings returned are an arbitrary subset of all findings. + // The findings list might be truncated because the input items were too + // large, or because the server reached the maximum amount of resources + // allowed for a single API call. For best results, divide the input into + // smaller batches. + FindingsTruncated bool `protobuf:"varint,2,opt,name=findings_truncated,json=findingsTruncated" json:"findings_truncated,omitempty"` +} + +func (m *InspectResult) Reset() { *m = InspectResult{} } +func (m *InspectResult) String() string { return proto.CompactTextString(m) } +func (*InspectResult) ProtoMessage() {} +func (*InspectResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *InspectResult) GetFindings() []*Finding { + if m != nil { + return m.Findings + } + return nil +} + +func (m *InspectResult) GetFindingsTruncated() bool { + if m != nil { + return m.FindingsTruncated + } + return false +} + +// Represents a piece of potentially sensitive content. +type Finding struct { + // The content that was found. Even if the content is not textual, it + // may be converted to a textual representation here. + // Provided if requested by the `InspectConfig` and the finding is + // less than or equal to 4096 bytes long. If the finding exceeds 4096 bytes + // in length, the quote may be omitted. + Quote string `protobuf:"bytes,1,opt,name=quote" json:"quote,omitempty"` + // The type of content that might have been found. + // Provided if requested by the `InspectConfig`. + InfoType *InfoType `protobuf:"bytes,2,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Estimate of how likely it is that the `info_type` is correct. + Likelihood Likelihood `protobuf:"varint,3,opt,name=likelihood,enum=google.privacy.dlp.v2.Likelihood" json:"likelihood,omitempty"` + // Where the content was found. + Location *Location `protobuf:"bytes,4,opt,name=location" json:"location,omitempty"` + // Timestamp when finding was detected. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Contains data parsed from quotes. Only populated if include_quote was set + // to true and a supported infoType was requested. Currently supported + // infoTypes: DATE, DATE_OF_BIRTH and TIME. + QuoteInfo *QuoteInfo `protobuf:"bytes,7,opt,name=quote_info,json=quoteInfo" json:"quote_info,omitempty"` +} + +func (m *Finding) Reset() { *m = Finding{} } +func (m *Finding) String() string { return proto.CompactTextString(m) } +func (*Finding) ProtoMessage() {} +func (*Finding) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Finding) GetQuote() string { + if m != nil { + return m.Quote + } + return "" +} + +func (m *Finding) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *Finding) GetLikelihood() Likelihood { + if m != nil { + return m.Likelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *Finding) GetLocation() *Location { + if m != nil { + return m.Location + } + return nil +} + +func (m *Finding) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *Finding) GetQuoteInfo() *QuoteInfo { + if m != nil { + return m.QuoteInfo + } + return nil +} + +// Specifies the location of the finding. +type Location struct { + // Zero-based byte offsets delimiting the finding. + // These are relative to the finding's containing element. + // Note that when the content is not textual, this references + // the UTF-8 encoded textual representation of the content. + // Omitted if content is an image. + ByteRange *Range `protobuf:"bytes,1,opt,name=byte_range,json=byteRange" json:"byte_range,omitempty"` + // Unicode character offsets delimiting the finding. + // These are relative to the finding's containing element. + // Provided when the content is text. + CodepointRange *Range `protobuf:"bytes,2,opt,name=codepoint_range,json=codepointRange" json:"codepoint_range,omitempty"` + // List of nested objects pointing to the precise location of the finding + // within the file or record. + ContentLocations []*ContentLocation `protobuf:"bytes,7,rep,name=content_locations,json=contentLocations" json:"content_locations,omitempty"` +} + +func (m *Location) Reset() { *m = Location{} } +func (m *Location) String() string { return proto.CompactTextString(m) } +func (*Location) ProtoMessage() {} +func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Location) GetByteRange() *Range { + if m != nil { + return m.ByteRange + } + return nil +} + +func (m *Location) GetCodepointRange() *Range { + if m != nil { + return m.CodepointRange + } + return nil +} + +func (m *Location) GetContentLocations() []*ContentLocation { + if m != nil { + return m.ContentLocations + } + return nil +} + +// Findings container location data. +type ContentLocation struct { + // Name of the container where the finding is located. + // The top level name is the source file name or table name. Nested names + // could be absent if the embedded object has no string identifier + // (for an example an image contained within a document). + ContainerName string `protobuf:"bytes,1,opt,name=container_name,json=containerName" json:"container_name,omitempty"` + // Type of the container within the file with location of the finding. + // + // Types that are valid to be assigned to Location: + // *ContentLocation_RecordLocation + // *ContentLocation_ImageLocation + // *ContentLocation_DocumentLocation + Location isContentLocation_Location `protobuf_oneof:"location"` + // Findings container modification timestamp, if applicable. + // For Google Cloud Storage contains last file modification timestamp. + // For BigQuery table contains last_modified_time property. + // For Datastore - not populated. + ContainerTimestamp *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=container_timestamp,json=containerTimestamp" json:"container_timestamp,omitempty"` + // Findings container version, if available + // ("generation" for Google Cloud Storage). + ContainerVersion string `protobuf:"bytes,7,opt,name=container_version,json=containerVersion" json:"container_version,omitempty"` +} + +func (m *ContentLocation) Reset() { *m = ContentLocation{} } +func (m *ContentLocation) String() string { return proto.CompactTextString(m) } +func (*ContentLocation) ProtoMessage() {} +func (*ContentLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isContentLocation_Location interface { + isContentLocation_Location() +} + +type ContentLocation_RecordLocation struct { + RecordLocation *RecordLocation `protobuf:"bytes,2,opt,name=record_location,json=recordLocation,oneof"` +} +type ContentLocation_ImageLocation struct { + ImageLocation *ImageLocation `protobuf:"bytes,3,opt,name=image_location,json=imageLocation,oneof"` +} +type ContentLocation_DocumentLocation struct { + DocumentLocation *DocumentLocation `protobuf:"bytes,5,opt,name=document_location,json=documentLocation,oneof"` +} + +func (*ContentLocation_RecordLocation) isContentLocation_Location() {} +func (*ContentLocation_ImageLocation) isContentLocation_Location() {} +func (*ContentLocation_DocumentLocation) isContentLocation_Location() {} + +func (m *ContentLocation) GetLocation() isContentLocation_Location { + if m != nil { + return m.Location + } + return nil +} + +func (m *ContentLocation) GetContainerName() string { + if m != nil { + return m.ContainerName + } + return "" +} + +func (m *ContentLocation) GetRecordLocation() *RecordLocation { + if x, ok := m.GetLocation().(*ContentLocation_RecordLocation); ok { + return x.RecordLocation + } + return nil +} + +func (m *ContentLocation) GetImageLocation() *ImageLocation { + if x, ok := m.GetLocation().(*ContentLocation_ImageLocation); ok { + return x.ImageLocation + } + return nil +} + +func (m *ContentLocation) GetDocumentLocation() *DocumentLocation { + if x, ok := m.GetLocation().(*ContentLocation_DocumentLocation); ok { + return x.DocumentLocation + } + return nil +} + +func (m *ContentLocation) GetContainerTimestamp() *google_protobuf1.Timestamp { + if m != nil { + return m.ContainerTimestamp + } + return nil +} + +func (m *ContentLocation) GetContainerVersion() string { + if m != nil { + return m.ContainerVersion + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ContentLocation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ContentLocation_OneofMarshaler, _ContentLocation_OneofUnmarshaler, _ContentLocation_OneofSizer, []interface{}{ + (*ContentLocation_RecordLocation)(nil), + (*ContentLocation_ImageLocation)(nil), + (*ContentLocation_DocumentLocation)(nil), + } +} + +func _ContentLocation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ContentLocation) + // location + switch x := m.Location.(type) { + case *ContentLocation_RecordLocation: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RecordLocation); err != nil { + return err + } + case *ContentLocation_ImageLocation: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ImageLocation); err != nil { + return err + } + case *ContentLocation_DocumentLocation: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DocumentLocation); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ContentLocation.Location has unexpected type %T", x) + } + return nil +} + +func _ContentLocation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ContentLocation) + switch tag { + case 2: // location.record_location + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RecordLocation) + err := b.DecodeMessage(msg) + m.Location = &ContentLocation_RecordLocation{msg} + return true, err + case 3: // location.image_location + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ImageLocation) + err := b.DecodeMessage(msg) + m.Location = &ContentLocation_ImageLocation{msg} + return true, err + case 5: // location.document_location + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DocumentLocation) + err := b.DecodeMessage(msg) + m.Location = &ContentLocation_DocumentLocation{msg} + return true, err + default: + return false, nil + } +} + +func _ContentLocation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ContentLocation) + // location + switch x := m.Location.(type) { + case *ContentLocation_RecordLocation: + s := proto.Size(x.RecordLocation) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ContentLocation_ImageLocation: + s := proto.Size(x.ImageLocation) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ContentLocation_DocumentLocation: + s := proto.Size(x.DocumentLocation) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Location of a finding within a document. +type DocumentLocation struct { + // Offset of the line, from the beginning of the file, where the finding + // is located. + FileOffset int64 `protobuf:"varint,1,opt,name=file_offset,json=fileOffset" json:"file_offset,omitempty"` +} + +func (m *DocumentLocation) Reset() { *m = DocumentLocation{} } +func (m *DocumentLocation) String() string { return proto.CompactTextString(m) } +func (*DocumentLocation) ProtoMessage() {} +func (*DocumentLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *DocumentLocation) GetFileOffset() int64 { + if m != nil { + return m.FileOffset + } + return 0 +} + +// Location of a finding within a row or record. +type RecordLocation struct { + // Key of the finding. + RecordKey *RecordKey `protobuf:"bytes,1,opt,name=record_key,json=recordKey" json:"record_key,omitempty"` + // Field id of the field containing the finding. + FieldId *FieldId `protobuf:"bytes,2,opt,name=field_id,json=fieldId" json:"field_id,omitempty"` + // Location within a `ContentItem.Table`. + TableLocation *TableLocation `protobuf:"bytes,3,opt,name=table_location,json=tableLocation" json:"table_location,omitempty"` +} + +func (m *RecordLocation) Reset() { *m = RecordLocation{} } +func (m *RecordLocation) String() string { return proto.CompactTextString(m) } +func (*RecordLocation) ProtoMessage() {} +func (*RecordLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *RecordLocation) GetRecordKey() *RecordKey { + if m != nil { + return m.RecordKey + } + return nil +} + +func (m *RecordLocation) GetFieldId() *FieldId { + if m != nil { + return m.FieldId + } + return nil +} + +func (m *RecordLocation) GetTableLocation() *TableLocation { + if m != nil { + return m.TableLocation + } + return nil +} + +// Location of a finding within a table. +type TableLocation struct { + // The zero-based index of the row where the finding is located. + RowIndex int64 `protobuf:"varint,1,opt,name=row_index,json=rowIndex" json:"row_index,omitempty"` +} + +func (m *TableLocation) Reset() { *m = TableLocation{} } +func (m *TableLocation) String() string { return proto.CompactTextString(m) } +func (*TableLocation) ProtoMessage() {} +func (*TableLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *TableLocation) GetRowIndex() int64 { + if m != nil { + return m.RowIndex + } + return 0 +} + +// Generic half-open interval [start, end) +type Range struct { + // Index of the first character of the range (inclusive). + Start int64 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + // Index of the last character of the range (exclusive). + End int64 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` +} + +func (m *Range) Reset() { *m = Range{} } +func (m *Range) String() string { return proto.CompactTextString(m) } +func (*Range) ProtoMessage() {} +func (*Range) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Range) GetStart() int64 { + if m != nil { + return m.Start + } + return 0 +} + +func (m *Range) GetEnd() int64 { + if m != nil { + return m.End + } + return 0 +} + +// Location of the finding within an image. +type ImageLocation struct { + // Bounding boxes locating the pixels within the image containing the finding. + BoundingBoxes []*BoundingBox `protobuf:"bytes,1,rep,name=bounding_boxes,json=boundingBoxes" json:"bounding_boxes,omitempty"` +} + +func (m *ImageLocation) Reset() { *m = ImageLocation{} } +func (m *ImageLocation) String() string { return proto.CompactTextString(m) } +func (*ImageLocation) ProtoMessage() {} +func (*ImageLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *ImageLocation) GetBoundingBoxes() []*BoundingBox { + if m != nil { + return m.BoundingBoxes + } + return nil +} + +// Bounding box encompassing detected text within an image. +type BoundingBox struct { + // Top coordinate of the bounding box. (0,0) is upper left. + Top int32 `protobuf:"varint,1,opt,name=top" json:"top,omitempty"` + // Left coordinate of the bounding box. (0,0) is upper left. + Left int32 `protobuf:"varint,2,opt,name=left" json:"left,omitempty"` + // Width of the bounding box in pixels. + Width int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"` + // Height of the bounding box in pixels. + Height int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"` +} + +func (m *BoundingBox) Reset() { *m = BoundingBox{} } +func (m *BoundingBox) String() string { return proto.CompactTextString(m) } +func (*BoundingBox) ProtoMessage() {} +func (*BoundingBox) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *BoundingBox) GetTop() int32 { + if m != nil { + return m.Top + } + return 0 +} + +func (m *BoundingBox) GetLeft() int32 { + if m != nil { + return m.Left + } + return 0 +} + +func (m *BoundingBox) GetWidth() int32 { + if m != nil { + return m.Width + } + return 0 +} + +func (m *BoundingBox) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +// Request to search for potentially sensitive info in a list of items +// and replace it with a default or provided content. +type RedactImageRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Configuration for the inspector. + InspectConfig *InspectConfig `protobuf:"bytes,2,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // The configuration for specifying what content to redact from images. + ImageRedactionConfigs []*RedactImageRequest_ImageRedactionConfig `protobuf:"bytes,5,rep,name=image_redaction_configs,json=imageRedactionConfigs" json:"image_redaction_configs,omitempty"` + // The content must be PNG, JPEG, SVG or BMP. + ByteItem *ByteContentItem `protobuf:"bytes,7,opt,name=byte_item,json=byteItem" json:"byte_item,omitempty"` +} + +func (m *RedactImageRequest) Reset() { *m = RedactImageRequest{} } +func (m *RedactImageRequest) String() string { return proto.CompactTextString(m) } +func (*RedactImageRequest) ProtoMessage() {} +func (*RedactImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *RedactImageRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *RedactImageRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *RedactImageRequest) GetImageRedactionConfigs() []*RedactImageRequest_ImageRedactionConfig { + if m != nil { + return m.ImageRedactionConfigs + } + return nil +} + +func (m *RedactImageRequest) GetByteItem() *ByteContentItem { + if m != nil { + return m.ByteItem + } + return nil +} + +// Configuration for determining how redaction of images should occur. +type RedactImageRequest_ImageRedactionConfig struct { + // Type of information to redact from images. + // + // Types that are valid to be assigned to Target: + // *RedactImageRequest_ImageRedactionConfig_InfoType + // *RedactImageRequest_ImageRedactionConfig_RedactAllText + Target isRedactImageRequest_ImageRedactionConfig_Target `protobuf_oneof:"target"` + // The color to use when redacting content from an image. If not specified, + // the default is black. + RedactionColor *Color `protobuf:"bytes,3,opt,name=redaction_color,json=redactionColor" json:"redaction_color,omitempty"` +} + +func (m *RedactImageRequest_ImageRedactionConfig) Reset() { + *m = RedactImageRequest_ImageRedactionConfig{} +} +func (m *RedactImageRequest_ImageRedactionConfig) String() string { return proto.CompactTextString(m) } +func (*RedactImageRequest_ImageRedactionConfig) ProtoMessage() {} +func (*RedactImageRequest_ImageRedactionConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{14, 0} +} + +type isRedactImageRequest_ImageRedactionConfig_Target interface { + isRedactImageRequest_ImageRedactionConfig_Target() +} + +type RedactImageRequest_ImageRedactionConfig_InfoType struct { + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType,oneof"` +} +type RedactImageRequest_ImageRedactionConfig_RedactAllText struct { + RedactAllText bool `protobuf:"varint,2,opt,name=redact_all_text,json=redactAllText,oneof"` +} + +func (*RedactImageRequest_ImageRedactionConfig_InfoType) isRedactImageRequest_ImageRedactionConfig_Target() { +} +func (*RedactImageRequest_ImageRedactionConfig_RedactAllText) isRedactImageRequest_ImageRedactionConfig_Target() { +} + +func (m *RedactImageRequest_ImageRedactionConfig) GetTarget() isRedactImageRequest_ImageRedactionConfig_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *RedactImageRequest_ImageRedactionConfig) GetInfoType() *InfoType { + if x, ok := m.GetTarget().(*RedactImageRequest_ImageRedactionConfig_InfoType); ok { + return x.InfoType + } + return nil +} + +func (m *RedactImageRequest_ImageRedactionConfig) GetRedactAllText() bool { + if x, ok := m.GetTarget().(*RedactImageRequest_ImageRedactionConfig_RedactAllText); ok { + return x.RedactAllText + } + return false +} + +func (m *RedactImageRequest_ImageRedactionConfig) GetRedactionColor() *Color { + if m != nil { + return m.RedactionColor + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RedactImageRequest_ImageRedactionConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RedactImageRequest_ImageRedactionConfig_OneofMarshaler, _RedactImageRequest_ImageRedactionConfig_OneofUnmarshaler, _RedactImageRequest_ImageRedactionConfig_OneofSizer, []interface{}{ + (*RedactImageRequest_ImageRedactionConfig_InfoType)(nil), + (*RedactImageRequest_ImageRedactionConfig_RedactAllText)(nil), + } +} + +func _RedactImageRequest_ImageRedactionConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RedactImageRequest_ImageRedactionConfig) + // target + switch x := m.Target.(type) { + case *RedactImageRequest_ImageRedactionConfig_InfoType: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InfoType); err != nil { + return err + } + case *RedactImageRequest_ImageRedactionConfig_RedactAllText: + t := uint64(0) + if x.RedactAllText { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("RedactImageRequest_ImageRedactionConfig.Target has unexpected type %T", x) + } + return nil +} + +func _RedactImageRequest_ImageRedactionConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RedactImageRequest_ImageRedactionConfig) + switch tag { + case 1: // target.info_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InfoType) + err := b.DecodeMessage(msg) + m.Target = &RedactImageRequest_ImageRedactionConfig_InfoType{msg} + return true, err + case 2: // target.redact_all_text + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Target = &RedactImageRequest_ImageRedactionConfig_RedactAllText{x != 0} + return true, err + default: + return false, nil + } +} + +func _RedactImageRequest_ImageRedactionConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RedactImageRequest_ImageRedactionConfig) + // target + switch x := m.Target.(type) { + case *RedactImageRequest_ImageRedactionConfig_InfoType: + s := proto.Size(x.InfoType) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RedactImageRequest_ImageRedactionConfig_RedactAllText: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Represents a color in the RGB color space. +type Color struct { + // The amount of red in the color as a value in the interval [0, 1]. + Red float32 `protobuf:"fixed32,1,opt,name=red" json:"red,omitempty"` + // The amount of green in the color as a value in the interval [0, 1]. + Green float32 `protobuf:"fixed32,2,opt,name=green" json:"green,omitempty"` + // The amount of blue in the color as a value in the interval [0, 1]. + Blue float32 `protobuf:"fixed32,3,opt,name=blue" json:"blue,omitempty"` +} + +func (m *Color) Reset() { *m = Color{} } +func (m *Color) String() string { return proto.CompactTextString(m) } +func (*Color) ProtoMessage() {} +func (*Color) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Color) GetRed() float32 { + if m != nil { + return m.Red + } + return 0 +} + +func (m *Color) GetGreen() float32 { + if m != nil { + return m.Green + } + return 0 +} + +func (m *Color) GetBlue() float32 { + if m != nil { + return m.Blue + } + return 0 +} + +// Results of redacting an image. +type RedactImageResponse struct { + // The redacted image. The type will be the same as the original image. + RedactedImage []byte `protobuf:"bytes,1,opt,name=redacted_image,json=redactedImage,proto3" json:"redacted_image,omitempty"` + // If an image was being inspected and the InspectConfig's include_quote was + // set to true, then this field will include all text, if any, that was found + // in the image. + ExtractedText string `protobuf:"bytes,2,opt,name=extracted_text,json=extractedText" json:"extracted_text,omitempty"` +} + +func (m *RedactImageResponse) Reset() { *m = RedactImageResponse{} } +func (m *RedactImageResponse) String() string { return proto.CompactTextString(m) } +func (*RedactImageResponse) ProtoMessage() {} +func (*RedactImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *RedactImageResponse) GetRedactedImage() []byte { + if m != nil { + return m.RedactedImage + } + return nil +} + +func (m *RedactImageResponse) GetExtractedText() string { + if m != nil { + return m.ExtractedText + } + return "" +} + +// Request to de-identify a list of items. +type DeidentifyContentRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Configuration for the de-identification of the content item. + // Items specified here will override the template referenced by the + // deidentify_template_name argument. + DeidentifyConfig *DeidentifyConfig `protobuf:"bytes,2,opt,name=deidentify_config,json=deidentifyConfig" json:"deidentify_config,omitempty"` + // Configuration for the inspector. + // Items specified here will override the template referenced by the + // inspect_template_name argument. + InspectConfig *InspectConfig `protobuf:"bytes,3,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // The item to de-identify. Will be treated as text. + Item *ContentItem `protobuf:"bytes,4,opt,name=item" json:"item,omitempty"` + // Optional template to use. Any configuration directly specified in + // inspect_config will override those set in the template. Singular fields + // that are set in this request will replace their corresponding fields in the + // template. Repeated fields are appended. Singular sub-messages and groups + // are recursively merged. + InspectTemplateName string `protobuf:"bytes,5,opt,name=inspect_template_name,json=inspectTemplateName" json:"inspect_template_name,omitempty"` + // Optional template to use. Any configuration directly specified in + // deidentify_config will override those set in the template. Singular fields + // that are set in this request will replace their corresponding fields in the + // template. Repeated fields are appended. Singular sub-messages and groups + // are recursively merged. + DeidentifyTemplateName string `protobuf:"bytes,6,opt,name=deidentify_template_name,json=deidentifyTemplateName" json:"deidentify_template_name,omitempty"` +} + +func (m *DeidentifyContentRequest) Reset() { *m = DeidentifyContentRequest{} } +func (m *DeidentifyContentRequest) String() string { return proto.CompactTextString(m) } +func (*DeidentifyContentRequest) ProtoMessage() {} +func (*DeidentifyContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *DeidentifyContentRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *DeidentifyContentRequest) GetDeidentifyConfig() *DeidentifyConfig { + if m != nil { + return m.DeidentifyConfig + } + return nil +} + +func (m *DeidentifyContentRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *DeidentifyContentRequest) GetItem() *ContentItem { + if m != nil { + return m.Item + } + return nil +} + +func (m *DeidentifyContentRequest) GetInspectTemplateName() string { + if m != nil { + return m.InspectTemplateName + } + return "" +} + +func (m *DeidentifyContentRequest) GetDeidentifyTemplateName() string { + if m != nil { + return m.DeidentifyTemplateName + } + return "" +} + +// Results of de-identifying a ContentItem. +type DeidentifyContentResponse struct { + // The de-identified item. + Item *ContentItem `protobuf:"bytes,1,opt,name=item" json:"item,omitempty"` + // An overview of the changes that were made on the `item`. + Overview *TransformationOverview `protobuf:"bytes,2,opt,name=overview" json:"overview,omitempty"` +} + +func (m *DeidentifyContentResponse) Reset() { *m = DeidentifyContentResponse{} } +func (m *DeidentifyContentResponse) String() string { return proto.CompactTextString(m) } +func (*DeidentifyContentResponse) ProtoMessage() {} +func (*DeidentifyContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *DeidentifyContentResponse) GetItem() *ContentItem { + if m != nil { + return m.Item + } + return nil +} + +func (m *DeidentifyContentResponse) GetOverview() *TransformationOverview { + if m != nil { + return m.Overview + } + return nil +} + +// Request to re-identify an item. +type ReidentifyContentRequest struct { + // The parent resource name. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Configuration for the re-identification of the content item. + // This field shares the same proto message type that is used for + // de-identification, however its usage here is for the reversal of the + // previous de-identification. Re-identification is performed by examining + // the transformations used to de-identify the items and executing the + // reverse. This requires that only reversible transformations + // be provided here. The reversible transformations are: + // + // - `CryptoReplaceFfxFpeConfig` + ReidentifyConfig *DeidentifyConfig `protobuf:"bytes,2,opt,name=reidentify_config,json=reidentifyConfig" json:"reidentify_config,omitempty"` + // Configuration for the inspector. + InspectConfig *InspectConfig `protobuf:"bytes,3,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // The item to re-identify. Will be treated as text. + Item *ContentItem `protobuf:"bytes,4,opt,name=item" json:"item,omitempty"` + // Optional template to use. Any configuration directly specified in + // `inspect_config` will override those set in the template. Singular fields + // that are set in this request will replace their corresponding fields in the + // template. Repeated fields are appended. Singular sub-messages and groups + // are recursively merged. + InspectTemplateName string `protobuf:"bytes,5,opt,name=inspect_template_name,json=inspectTemplateName" json:"inspect_template_name,omitempty"` + // Optional template to use. References an instance of `DeidentifyTemplate`. + // Any configuration directly specified in `reidentify_config` or + // `inspect_config` will override those set in the template. Singular fields + // that are set in this request will replace their corresponding fields in the + // template. Repeated fields are appended. Singular sub-messages and groups + // are recursively merged. + ReidentifyTemplateName string `protobuf:"bytes,6,opt,name=reidentify_template_name,json=reidentifyTemplateName" json:"reidentify_template_name,omitempty"` +} + +func (m *ReidentifyContentRequest) Reset() { *m = ReidentifyContentRequest{} } +func (m *ReidentifyContentRequest) String() string { return proto.CompactTextString(m) } +func (*ReidentifyContentRequest) ProtoMessage() {} +func (*ReidentifyContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *ReidentifyContentRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ReidentifyContentRequest) GetReidentifyConfig() *DeidentifyConfig { + if m != nil { + return m.ReidentifyConfig + } + return nil +} + +func (m *ReidentifyContentRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *ReidentifyContentRequest) GetItem() *ContentItem { + if m != nil { + return m.Item + } + return nil +} + +func (m *ReidentifyContentRequest) GetInspectTemplateName() string { + if m != nil { + return m.InspectTemplateName + } + return "" +} + +func (m *ReidentifyContentRequest) GetReidentifyTemplateName() string { + if m != nil { + return m.ReidentifyTemplateName + } + return "" +} + +// Results of re-identifying a item. +type ReidentifyContentResponse struct { + // The re-identified item. + Item *ContentItem `protobuf:"bytes,1,opt,name=item" json:"item,omitempty"` + // An overview of the changes that were made to the `item`. + Overview *TransformationOverview `protobuf:"bytes,2,opt,name=overview" json:"overview,omitempty"` +} + +func (m *ReidentifyContentResponse) Reset() { *m = ReidentifyContentResponse{} } +func (m *ReidentifyContentResponse) String() string { return proto.CompactTextString(m) } +func (*ReidentifyContentResponse) ProtoMessage() {} +func (*ReidentifyContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ReidentifyContentResponse) GetItem() *ContentItem { + if m != nil { + return m.Item + } + return nil +} + +func (m *ReidentifyContentResponse) GetOverview() *TransformationOverview { + if m != nil { + return m.Overview + } + return nil +} + +// Request to search for potentially sensitive info in a ContentItem. +type InspectContentRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Configuration for the inspector. What specified here will override + // the template referenced by the inspect_template_name argument. + InspectConfig *InspectConfig `protobuf:"bytes,2,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // The item to inspect. + Item *ContentItem `protobuf:"bytes,3,opt,name=item" json:"item,omitempty"` + // Optional template to use. Any configuration directly specified in + // inspect_config will override those set in the template. Singular fields + // that are set in this request will replace their corresponding fields in the + // template. Repeated fields are appended. Singular sub-messages and groups + // are recursively merged. + InspectTemplateName string `protobuf:"bytes,4,opt,name=inspect_template_name,json=inspectTemplateName" json:"inspect_template_name,omitempty"` +} + +func (m *InspectContentRequest) Reset() { *m = InspectContentRequest{} } +func (m *InspectContentRequest) String() string { return proto.CompactTextString(m) } +func (*InspectContentRequest) ProtoMessage() {} +func (*InspectContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *InspectContentRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *InspectContentRequest) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *InspectContentRequest) GetItem() *ContentItem { + if m != nil { + return m.Item + } + return nil +} + +func (m *InspectContentRequest) GetInspectTemplateName() string { + if m != nil { + return m.InspectTemplateName + } + return "" +} + +// Results of inspecting an item. +type InspectContentResponse struct { + // The findings. + Result *InspectResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` +} + +func (m *InspectContentResponse) Reset() { *m = InspectContentResponse{} } +func (m *InspectContentResponse) String() string { return proto.CompactTextString(m) } +func (*InspectContentResponse) ProtoMessage() {} +func (*InspectContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *InspectContentResponse) GetResult() *InspectResult { + if m != nil { + return m.Result + } + return nil +} + +// Cloud repository for storing output. +type OutputStorageConfig struct { + // Types that are valid to be assigned to Type: + // *OutputStorageConfig_Table + Type isOutputStorageConfig_Type `protobuf_oneof:"type"` + // Schema used for writing the findings. Columns are derived from the + // `Finding` object. If appending to an existing table, any columns from the + // predefined schema that are missing will be added. No columns in the + // existing table will be deleted. + // + // If unspecified, then all available columns will be used for a new table, + // and no changes will be made to an existing table. + OutputSchema OutputStorageConfig_OutputSchema `protobuf:"varint,3,opt,name=output_schema,json=outputSchema,enum=google.privacy.dlp.v2.OutputStorageConfig_OutputSchema" json:"output_schema,omitempty"` +} + +func (m *OutputStorageConfig) Reset() { *m = OutputStorageConfig{} } +func (m *OutputStorageConfig) String() string { return proto.CompactTextString(m) } +func (*OutputStorageConfig) ProtoMessage() {} +func (*OutputStorageConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +type isOutputStorageConfig_Type interface { + isOutputStorageConfig_Type() +} + +type OutputStorageConfig_Table struct { + Table *BigQueryTable `protobuf:"bytes,1,opt,name=table,oneof"` +} + +func (*OutputStorageConfig_Table) isOutputStorageConfig_Type() {} + +func (m *OutputStorageConfig) GetType() isOutputStorageConfig_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *OutputStorageConfig) GetTable() *BigQueryTable { + if x, ok := m.GetType().(*OutputStorageConfig_Table); ok { + return x.Table + } + return nil +} + +func (m *OutputStorageConfig) GetOutputSchema() OutputStorageConfig_OutputSchema { + if m != nil { + return m.OutputSchema + } + return OutputStorageConfig_OUTPUT_SCHEMA_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*OutputStorageConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _OutputStorageConfig_OneofMarshaler, _OutputStorageConfig_OneofUnmarshaler, _OutputStorageConfig_OneofSizer, []interface{}{ + (*OutputStorageConfig_Table)(nil), + } +} + +func _OutputStorageConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*OutputStorageConfig) + // type + switch x := m.Type.(type) { + case *OutputStorageConfig_Table: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Table); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("OutputStorageConfig.Type has unexpected type %T", x) + } + return nil +} + +func _OutputStorageConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*OutputStorageConfig) + switch tag { + case 1: // type.table + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BigQueryTable) + err := b.DecodeMessage(msg) + m.Type = &OutputStorageConfig_Table{msg} + return true, err + default: + return false, nil + } +} + +func _OutputStorageConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*OutputStorageConfig) + // type + switch x := m.Type.(type) { + case *OutputStorageConfig_Table: + s := proto.Size(x.Table) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Statistics regarding a specific InfoType. +type InfoTypeStats struct { + // The type of finding this stat is for. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Number of findings for this infoType. + Count int64 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` +} + +func (m *InfoTypeStats) Reset() { *m = InfoTypeStats{} } +func (m *InfoTypeStats) String() string { return proto.CompactTextString(m) } +func (*InfoTypeStats) ProtoMessage() {} +func (*InfoTypeStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *InfoTypeStats) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *InfoTypeStats) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +// The results of an inspect DataSource job. +type InspectDataSourceDetails struct { + // The configuration used for this job. + RequestedOptions *InspectDataSourceDetails_RequestedOptions `protobuf:"bytes,2,opt,name=requested_options,json=requestedOptions" json:"requested_options,omitempty"` + // A summary of the outcome of this inspect job. + Result *InspectDataSourceDetails_Result `protobuf:"bytes,3,opt,name=result" json:"result,omitempty"` +} + +func (m *InspectDataSourceDetails) Reset() { *m = InspectDataSourceDetails{} } +func (m *InspectDataSourceDetails) String() string { return proto.CompactTextString(m) } +func (*InspectDataSourceDetails) ProtoMessage() {} +func (*InspectDataSourceDetails) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *InspectDataSourceDetails) GetRequestedOptions() *InspectDataSourceDetails_RequestedOptions { + if m != nil { + return m.RequestedOptions + } + return nil +} + +func (m *InspectDataSourceDetails) GetResult() *InspectDataSourceDetails_Result { + if m != nil { + return m.Result + } + return nil +} + +type InspectDataSourceDetails_RequestedOptions struct { + // If run with an inspect template, a snapshot of it's state at the time of + // this run. + SnapshotInspectTemplate *InspectTemplate `protobuf:"bytes,1,opt,name=snapshot_inspect_template,json=snapshotInspectTemplate" json:"snapshot_inspect_template,omitempty"` + JobConfig *InspectJobConfig `protobuf:"bytes,3,opt,name=job_config,json=jobConfig" json:"job_config,omitempty"` +} + +func (m *InspectDataSourceDetails_RequestedOptions) Reset() { + *m = InspectDataSourceDetails_RequestedOptions{} +} +func (m *InspectDataSourceDetails_RequestedOptions) String() string { return proto.CompactTextString(m) } +func (*InspectDataSourceDetails_RequestedOptions) ProtoMessage() {} +func (*InspectDataSourceDetails_RequestedOptions) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{25, 0} +} + +func (m *InspectDataSourceDetails_RequestedOptions) GetSnapshotInspectTemplate() *InspectTemplate { + if m != nil { + return m.SnapshotInspectTemplate + } + return nil +} + +func (m *InspectDataSourceDetails_RequestedOptions) GetJobConfig() *InspectJobConfig { + if m != nil { + return m.JobConfig + } + return nil +} + +type InspectDataSourceDetails_Result struct { + // Total size in bytes that were processed. + ProcessedBytes int64 `protobuf:"varint,1,opt,name=processed_bytes,json=processedBytes" json:"processed_bytes,omitempty"` + // Estimate of the number of bytes to process. + TotalEstimatedBytes int64 `protobuf:"varint,2,opt,name=total_estimated_bytes,json=totalEstimatedBytes" json:"total_estimated_bytes,omitempty"` + // Statistics of how many instances of each info type were found during + // inspect job. + InfoTypeStats []*InfoTypeStats `protobuf:"bytes,3,rep,name=info_type_stats,json=infoTypeStats" json:"info_type_stats,omitempty"` +} + +func (m *InspectDataSourceDetails_Result) Reset() { *m = InspectDataSourceDetails_Result{} } +func (m *InspectDataSourceDetails_Result) String() string { return proto.CompactTextString(m) } +func (*InspectDataSourceDetails_Result) ProtoMessage() {} +func (*InspectDataSourceDetails_Result) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{25, 1} +} + +func (m *InspectDataSourceDetails_Result) GetProcessedBytes() int64 { + if m != nil { + return m.ProcessedBytes + } + return 0 +} + +func (m *InspectDataSourceDetails_Result) GetTotalEstimatedBytes() int64 { + if m != nil { + return m.TotalEstimatedBytes + } + return 0 +} + +func (m *InspectDataSourceDetails_Result) GetInfoTypeStats() []*InfoTypeStats { + if m != nil { + return m.InfoTypeStats + } + return nil +} + +// InfoType description. +type InfoTypeDescription struct { + // Internal name of the infoType. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Human readable form of the infoType name. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Which parts of the API supports this InfoType. + SupportedBy []InfoTypeSupportedBy `protobuf:"varint,3,rep,packed,name=supported_by,json=supportedBy,enum=google.privacy.dlp.v2.InfoTypeSupportedBy" json:"supported_by,omitempty"` +} + +func (m *InfoTypeDescription) Reset() { *m = InfoTypeDescription{} } +func (m *InfoTypeDescription) String() string { return proto.CompactTextString(m) } +func (*InfoTypeDescription) ProtoMessage() {} +func (*InfoTypeDescription) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *InfoTypeDescription) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InfoTypeDescription) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *InfoTypeDescription) GetSupportedBy() []InfoTypeSupportedBy { + if m != nil { + return m.SupportedBy + } + return nil +} + +// Request for the list of infoTypes. +type ListInfoTypesRequest struct { + // Optional BCP-47 language code for localized infoType friendly + // names. If omitted, or if localized strings are not available, + // en-US strings will be returned. + LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // Optional filter to only return infoTypes supported by certain parts of the + // API. Defaults to supported_by=INSPECT. + Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListInfoTypesRequest) Reset() { *m = ListInfoTypesRequest{} } +func (m *ListInfoTypesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInfoTypesRequest) ProtoMessage() {} +func (*ListInfoTypesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *ListInfoTypesRequest) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *ListInfoTypesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// Response to the ListInfoTypes request. +type ListInfoTypesResponse struct { + // Set of sensitive infoTypes. + InfoTypes []*InfoTypeDescription `protobuf:"bytes,1,rep,name=info_types,json=infoTypes" json:"info_types,omitempty"` +} + +func (m *ListInfoTypesResponse) Reset() { *m = ListInfoTypesResponse{} } +func (m *ListInfoTypesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInfoTypesResponse) ProtoMessage() {} +func (*ListInfoTypesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *ListInfoTypesResponse) GetInfoTypes() []*InfoTypeDescription { + if m != nil { + return m.InfoTypes + } + return nil +} + +// Configuration for a risk analysis job. +type RiskAnalysisJobConfig struct { + // Privacy metric to compute. + PrivacyMetric *PrivacyMetric `protobuf:"bytes,1,opt,name=privacy_metric,json=privacyMetric" json:"privacy_metric,omitempty"` + // Input dataset to compute metrics over. + SourceTable *BigQueryTable `protobuf:"bytes,2,opt,name=source_table,json=sourceTable" json:"source_table,omitempty"` + // Actions to execute at the completion of the job. Are executed in the order + // provided. + Actions []*Action `protobuf:"bytes,3,rep,name=actions" json:"actions,omitempty"` +} + +func (m *RiskAnalysisJobConfig) Reset() { *m = RiskAnalysisJobConfig{} } +func (m *RiskAnalysisJobConfig) String() string { return proto.CompactTextString(m) } +func (*RiskAnalysisJobConfig) ProtoMessage() {} +func (*RiskAnalysisJobConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *RiskAnalysisJobConfig) GetPrivacyMetric() *PrivacyMetric { + if m != nil { + return m.PrivacyMetric + } + return nil +} + +func (m *RiskAnalysisJobConfig) GetSourceTable() *BigQueryTable { + if m != nil { + return m.SourceTable + } + return nil +} + +func (m *RiskAnalysisJobConfig) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +// Privacy metric to compute for reidentification risk analysis. +type PrivacyMetric struct { + // Types that are valid to be assigned to Type: + // *PrivacyMetric_NumericalStatsConfig_ + // *PrivacyMetric_CategoricalStatsConfig_ + // *PrivacyMetric_KAnonymityConfig_ + // *PrivacyMetric_LDiversityConfig_ + // *PrivacyMetric_KMapEstimationConfig_ + Type isPrivacyMetric_Type `protobuf_oneof:"type"` +} + +func (m *PrivacyMetric) Reset() { *m = PrivacyMetric{} } +func (m *PrivacyMetric) String() string { return proto.CompactTextString(m) } +func (*PrivacyMetric) ProtoMessage() {} +func (*PrivacyMetric) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isPrivacyMetric_Type interface { + isPrivacyMetric_Type() +} + +type PrivacyMetric_NumericalStatsConfig_ struct { + NumericalStatsConfig *PrivacyMetric_NumericalStatsConfig `protobuf:"bytes,1,opt,name=numerical_stats_config,json=numericalStatsConfig,oneof"` +} +type PrivacyMetric_CategoricalStatsConfig_ struct { + CategoricalStatsConfig *PrivacyMetric_CategoricalStatsConfig `protobuf:"bytes,2,opt,name=categorical_stats_config,json=categoricalStatsConfig,oneof"` +} +type PrivacyMetric_KAnonymityConfig_ struct { + KAnonymityConfig *PrivacyMetric_KAnonymityConfig `protobuf:"bytes,3,opt,name=k_anonymity_config,json=kAnonymityConfig,oneof"` +} +type PrivacyMetric_LDiversityConfig_ struct { + LDiversityConfig *PrivacyMetric_LDiversityConfig `protobuf:"bytes,4,opt,name=l_diversity_config,json=lDiversityConfig,oneof"` +} +type PrivacyMetric_KMapEstimationConfig_ struct { + KMapEstimationConfig *PrivacyMetric_KMapEstimationConfig `protobuf:"bytes,5,opt,name=k_map_estimation_config,json=kMapEstimationConfig,oneof"` +} + +func (*PrivacyMetric_NumericalStatsConfig_) isPrivacyMetric_Type() {} +func (*PrivacyMetric_CategoricalStatsConfig_) isPrivacyMetric_Type() {} +func (*PrivacyMetric_KAnonymityConfig_) isPrivacyMetric_Type() {} +func (*PrivacyMetric_LDiversityConfig_) isPrivacyMetric_Type() {} +func (*PrivacyMetric_KMapEstimationConfig_) isPrivacyMetric_Type() {} + +func (m *PrivacyMetric) GetType() isPrivacyMetric_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PrivacyMetric) GetNumericalStatsConfig() *PrivacyMetric_NumericalStatsConfig { + if x, ok := m.GetType().(*PrivacyMetric_NumericalStatsConfig_); ok { + return x.NumericalStatsConfig + } + return nil +} + +func (m *PrivacyMetric) GetCategoricalStatsConfig() *PrivacyMetric_CategoricalStatsConfig { + if x, ok := m.GetType().(*PrivacyMetric_CategoricalStatsConfig_); ok { + return x.CategoricalStatsConfig + } + return nil +} + +func (m *PrivacyMetric) GetKAnonymityConfig() *PrivacyMetric_KAnonymityConfig { + if x, ok := m.GetType().(*PrivacyMetric_KAnonymityConfig_); ok { + return x.KAnonymityConfig + } + return nil +} + +func (m *PrivacyMetric) GetLDiversityConfig() *PrivacyMetric_LDiversityConfig { + if x, ok := m.GetType().(*PrivacyMetric_LDiversityConfig_); ok { + return x.LDiversityConfig + } + return nil +} + +func (m *PrivacyMetric) GetKMapEstimationConfig() *PrivacyMetric_KMapEstimationConfig { + if x, ok := m.GetType().(*PrivacyMetric_KMapEstimationConfig_); ok { + return x.KMapEstimationConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PrivacyMetric) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PrivacyMetric_OneofMarshaler, _PrivacyMetric_OneofUnmarshaler, _PrivacyMetric_OneofSizer, []interface{}{ + (*PrivacyMetric_NumericalStatsConfig_)(nil), + (*PrivacyMetric_CategoricalStatsConfig_)(nil), + (*PrivacyMetric_KAnonymityConfig_)(nil), + (*PrivacyMetric_LDiversityConfig_)(nil), + (*PrivacyMetric_KMapEstimationConfig_)(nil), + } +} + +func _PrivacyMetric_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PrivacyMetric) + // type + switch x := m.Type.(type) { + case *PrivacyMetric_NumericalStatsConfig_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NumericalStatsConfig); err != nil { + return err + } + case *PrivacyMetric_CategoricalStatsConfig_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CategoricalStatsConfig); err != nil { + return err + } + case *PrivacyMetric_KAnonymityConfig_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KAnonymityConfig); err != nil { + return err + } + case *PrivacyMetric_LDiversityConfig_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LDiversityConfig); err != nil { + return err + } + case *PrivacyMetric_KMapEstimationConfig_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KMapEstimationConfig); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PrivacyMetric.Type has unexpected type %T", x) + } + return nil +} + +func _PrivacyMetric_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PrivacyMetric) + switch tag { + case 1: // type.numerical_stats_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PrivacyMetric_NumericalStatsConfig) + err := b.DecodeMessage(msg) + m.Type = &PrivacyMetric_NumericalStatsConfig_{msg} + return true, err + case 2: // type.categorical_stats_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PrivacyMetric_CategoricalStatsConfig) + err := b.DecodeMessage(msg) + m.Type = &PrivacyMetric_CategoricalStatsConfig_{msg} + return true, err + case 3: // type.k_anonymity_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PrivacyMetric_KAnonymityConfig) + err := b.DecodeMessage(msg) + m.Type = &PrivacyMetric_KAnonymityConfig_{msg} + return true, err + case 4: // type.l_diversity_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PrivacyMetric_LDiversityConfig) + err := b.DecodeMessage(msg) + m.Type = &PrivacyMetric_LDiversityConfig_{msg} + return true, err + case 5: // type.k_map_estimation_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PrivacyMetric_KMapEstimationConfig) + err := b.DecodeMessage(msg) + m.Type = &PrivacyMetric_KMapEstimationConfig_{msg} + return true, err + default: + return false, nil + } +} + +func _PrivacyMetric_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PrivacyMetric) + // type + switch x := m.Type.(type) { + case *PrivacyMetric_NumericalStatsConfig_: + s := proto.Size(x.NumericalStatsConfig) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrivacyMetric_CategoricalStatsConfig_: + s := proto.Size(x.CategoricalStatsConfig) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrivacyMetric_KAnonymityConfig_: + s := proto.Size(x.KAnonymityConfig) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrivacyMetric_LDiversityConfig_: + s := proto.Size(x.LDiversityConfig) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrivacyMetric_KMapEstimationConfig_: + s := proto.Size(x.KMapEstimationConfig) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Compute numerical stats over an individual column, including +// min, max, and quantiles. +type PrivacyMetric_NumericalStatsConfig struct { + // Field to compute numerical stats on. Supported types are + // integer, float, date, datetime, timestamp, time. + Field *FieldId `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` +} + +func (m *PrivacyMetric_NumericalStatsConfig) Reset() { *m = PrivacyMetric_NumericalStatsConfig{} } +func (m *PrivacyMetric_NumericalStatsConfig) String() string { return proto.CompactTextString(m) } +func (*PrivacyMetric_NumericalStatsConfig) ProtoMessage() {} +func (*PrivacyMetric_NumericalStatsConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 0} +} + +func (m *PrivacyMetric_NumericalStatsConfig) GetField() *FieldId { + if m != nil { + return m.Field + } + return nil +} + +// Compute numerical stats over an individual column, including +// number of distinct values and value count distribution. +type PrivacyMetric_CategoricalStatsConfig struct { + // Field to compute categorical stats on. All column types are + // supported except for arrays and structs. However, it may be more + // informative to use NumericalStats when the field type is supported, + // depending on the data. + Field *FieldId `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` +} + +func (m *PrivacyMetric_CategoricalStatsConfig) Reset() { *m = PrivacyMetric_CategoricalStatsConfig{} } +func (m *PrivacyMetric_CategoricalStatsConfig) String() string { return proto.CompactTextString(m) } +func (*PrivacyMetric_CategoricalStatsConfig) ProtoMessage() {} +func (*PrivacyMetric_CategoricalStatsConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 1} +} + +func (m *PrivacyMetric_CategoricalStatsConfig) GetField() *FieldId { + if m != nil { + return m.Field + } + return nil +} + +// k-anonymity metric, used for analysis of reidentification risk. +type PrivacyMetric_KAnonymityConfig struct { + // Set of fields to compute k-anonymity over. When multiple fields are + // specified, they are considered a single composite key. Structs and + // repeated data types are not supported; however, nested fields are + // supported so long as they are not structs themselves or nested within + // a repeated field. + QuasiIds []*FieldId `protobuf:"bytes,1,rep,name=quasi_ids,json=quasiIds" json:"quasi_ids,omitempty"` +} + +func (m *PrivacyMetric_KAnonymityConfig) Reset() { *m = PrivacyMetric_KAnonymityConfig{} } +func (m *PrivacyMetric_KAnonymityConfig) String() string { return proto.CompactTextString(m) } +func (*PrivacyMetric_KAnonymityConfig) ProtoMessage() {} +func (*PrivacyMetric_KAnonymityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 2} +} + +func (m *PrivacyMetric_KAnonymityConfig) GetQuasiIds() []*FieldId { + if m != nil { + return m.QuasiIds + } + return nil +} + +// l-diversity metric, used for analysis of reidentification risk. +type PrivacyMetric_LDiversityConfig struct { + // Set of quasi-identifiers indicating how equivalence classes are + // defined for the l-diversity computation. When multiple fields are + // specified, they are considered a single composite key. + QuasiIds []*FieldId `protobuf:"bytes,1,rep,name=quasi_ids,json=quasiIds" json:"quasi_ids,omitempty"` + // Sensitive field for computing the l-value. + SensitiveAttribute *FieldId `protobuf:"bytes,2,opt,name=sensitive_attribute,json=sensitiveAttribute" json:"sensitive_attribute,omitempty"` +} + +func (m *PrivacyMetric_LDiversityConfig) Reset() { *m = PrivacyMetric_LDiversityConfig{} } +func (m *PrivacyMetric_LDiversityConfig) String() string { return proto.CompactTextString(m) } +func (*PrivacyMetric_LDiversityConfig) ProtoMessage() {} +func (*PrivacyMetric_LDiversityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 3} +} + +func (m *PrivacyMetric_LDiversityConfig) GetQuasiIds() []*FieldId { + if m != nil { + return m.QuasiIds + } + return nil +} + +func (m *PrivacyMetric_LDiversityConfig) GetSensitiveAttribute() *FieldId { + if m != nil { + return m.SensitiveAttribute + } + return nil +} + +// Reidentifiability metric. This corresponds to a risk model similar to what +// is called "journalist risk" in the literature, except the attack dataset is +// statistically modeled instead of being perfectly known. This can be done +// using publicly available data (like the US Census), or using a custom +// statistical model (indicated as one or several BigQuery tables), or by +// extrapolating from the distribution of values in the input dataset. +type PrivacyMetric_KMapEstimationConfig struct { + // Fields considered to be quasi-identifiers. No two columns can have the + // same tag. [required] + QuasiIds []*PrivacyMetric_KMapEstimationConfig_TaggedField `protobuf:"bytes,1,rep,name=quasi_ids,json=quasiIds" json:"quasi_ids,omitempty"` + // ISO 3166-1 alpha-2 region code to use in the statistical modeling. + // Required if no column is tagged with a region-specific InfoType (like + // US_ZIP_5) or a region code. + RegionCode string `protobuf:"bytes,2,opt,name=region_code,json=regionCode" json:"region_code,omitempty"` + // Several auxiliary tables can be used in the analysis. Each custom_tag + // used to tag a quasi-identifiers column must appear in exactly one column + // of one auxiliary table. + AuxiliaryTables []*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable `protobuf:"bytes,3,rep,name=auxiliary_tables,json=auxiliaryTables" json:"auxiliary_tables,omitempty"` +} + +func (m *PrivacyMetric_KMapEstimationConfig) Reset() { *m = PrivacyMetric_KMapEstimationConfig{} } +func (m *PrivacyMetric_KMapEstimationConfig) String() string { return proto.CompactTextString(m) } +func (*PrivacyMetric_KMapEstimationConfig) ProtoMessage() {} +func (*PrivacyMetric_KMapEstimationConfig) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 4} +} + +func (m *PrivacyMetric_KMapEstimationConfig) GetQuasiIds() []*PrivacyMetric_KMapEstimationConfig_TaggedField { + if m != nil { + return m.QuasiIds + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig) GetRegionCode() string { + if m != nil { + return m.RegionCode + } + return "" +} + +func (m *PrivacyMetric_KMapEstimationConfig) GetAuxiliaryTables() []*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable { + if m != nil { + return m.AuxiliaryTables + } + return nil +} + +// A column with a semantic tag attached. +type PrivacyMetric_KMapEstimationConfig_TaggedField struct { + // Identifies the column. [required] + Field *FieldId `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` + // Semantic tag that identifies what a column contains, to determine which + // statistical model to use to estimate the reidentifiability of each + // value. [required] + // + // Types that are valid to be assigned to Tag: + // *PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType + // *PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag + // *PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred + Tag isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag `protobuf_oneof:"tag"` +} + +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) Reset() { + *m = PrivacyMetric_KMapEstimationConfig_TaggedField{} +} +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) String() string { + return proto.CompactTextString(m) +} +func (*PrivacyMetric_KMapEstimationConfig_TaggedField) ProtoMessage() {} +func (*PrivacyMetric_KMapEstimationConfig_TaggedField) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 4, 0} +} + +type isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag interface { + isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag() +} + +type PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType struct { + InfoType *InfoType `protobuf:"bytes,2,opt,name=info_type,json=infoType,oneof"` +} +type PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag struct { + CustomTag string `protobuf:"bytes,3,opt,name=custom_tag,json=customTag,oneof"` +} +type PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred struct { + Inferred *google_protobuf3.Empty `protobuf:"bytes,4,opt,name=inferred,oneof"` +} + +func (*PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType) isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag() { +} +func (*PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag) isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag() { +} +func (*PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred) isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag() { +} + +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) GetTag() isPrivacyMetric_KMapEstimationConfig_TaggedField_Tag { + if m != nil { + return m.Tag + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) GetField() *FieldId { + if m != nil { + return m.Field + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) GetInfoType() *InfoType { + if x, ok := m.GetTag().(*PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType); ok { + return x.InfoType + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) GetCustomTag() string { + if x, ok := m.GetTag().(*PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag); ok { + return x.CustomTag + } + return "" +} + +func (m *PrivacyMetric_KMapEstimationConfig_TaggedField) GetInferred() *google_protobuf3.Empty { + if x, ok := m.GetTag().(*PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred); ok { + return x.Inferred + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PrivacyMetric_KMapEstimationConfig_TaggedField) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PrivacyMetric_KMapEstimationConfig_TaggedField_OneofMarshaler, _PrivacyMetric_KMapEstimationConfig_TaggedField_OneofUnmarshaler, _PrivacyMetric_KMapEstimationConfig_TaggedField_OneofSizer, []interface{}{ + (*PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType)(nil), + (*PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag)(nil), + (*PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred)(nil), + } +} + +func _PrivacyMetric_KMapEstimationConfig_TaggedField_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PrivacyMetric_KMapEstimationConfig_TaggedField) + // tag + switch x := m.Tag.(type) { + case *PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InfoType); err != nil { + return err + } + case *PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.CustomTag) + case *PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Inferred); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PrivacyMetric_KMapEstimationConfig_TaggedField.Tag has unexpected type %T", x) + } + return nil +} + +func _PrivacyMetric_KMapEstimationConfig_TaggedField_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PrivacyMetric_KMapEstimationConfig_TaggedField) + switch tag { + case 2: // tag.info_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InfoType) + err := b.DecodeMessage(msg) + m.Tag = &PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType{msg} + return true, err + case 3: // tag.custom_tag + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Tag = &PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag{x} + return true, err + case 4: // tag.inferred + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Empty) + err := b.DecodeMessage(msg) + m.Tag = &PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred{msg} + return true, err + default: + return false, nil + } +} + +func _PrivacyMetric_KMapEstimationConfig_TaggedField_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PrivacyMetric_KMapEstimationConfig_TaggedField) + // tag + switch x := m.Tag.(type) { + case *PrivacyMetric_KMapEstimationConfig_TaggedField_InfoType: + s := proto.Size(x.InfoType) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrivacyMetric_KMapEstimationConfig_TaggedField_CustomTag: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.CustomTag))) + n += len(x.CustomTag) + case *PrivacyMetric_KMapEstimationConfig_TaggedField_Inferred: + s := proto.Size(x.Inferred) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// An auxiliary table contains statistical information on the relative +// frequency of different quasi-identifiers values. It has one or several +// quasi-identifiers columns, and one column that indicates the relative +// frequency of each quasi-identifier tuple. +// If a tuple is present in the data but not in the auxiliary table, the +// corresponding relative frequency is assumed to be zero (and thus, the +// tuple is highly reidentifiable). +type PrivacyMetric_KMapEstimationConfig_AuxiliaryTable struct { + // Auxiliary table location. [required] + Table *BigQueryTable `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + // Quasi-identifier columns. [required] + QuasiIds []*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField `protobuf:"bytes,1,rep,name=quasi_ids,json=quasiIds" json:"quasi_ids,omitempty"` + // The relative frequency column must contain a floating-point number + // between 0 and 1 (inclusive). Null values are assumed to be zero. + // [required] + RelativeFrequency *FieldId `protobuf:"bytes,2,opt,name=relative_frequency,json=relativeFrequency" json:"relative_frequency,omitempty"` +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) Reset() { + *m = PrivacyMetric_KMapEstimationConfig_AuxiliaryTable{} +} +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) String() string { + return proto.CompactTextString(m) +} +func (*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) ProtoMessage() {} +func (*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 4, 1} +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) GetTable() *BigQueryTable { + if m != nil { + return m.Table + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) GetQuasiIds() []*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField { + if m != nil { + return m.QuasiIds + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable) GetRelativeFrequency() *FieldId { + if m != nil { + return m.RelativeFrequency + } + return nil +} + +// A quasi-identifier column has a custom_tag, used to know which column +// in the data corresponds to which column in the statistical model. +type PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField struct { + Field *FieldId `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` + CustomTag string `protobuf:"bytes,2,opt,name=custom_tag,json=customTag" json:"custom_tag,omitempty"` +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField) Reset() { + *m = PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField{} +} +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField) String() string { + return proto.CompactTextString(m) +} +func (*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField) ProtoMessage() {} +func (*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{30, 4, 1, 0} +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField) GetField() *FieldId { + if m != nil { + return m.Field + } + return nil +} + +func (m *PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField) GetCustomTag() string { + if m != nil { + return m.CustomTag + } + return "" +} + +// Result of a risk analysis operation request. +type AnalyzeDataSourceRiskDetails struct { + // Privacy metric to compute. + RequestedPrivacyMetric *PrivacyMetric `protobuf:"bytes,1,opt,name=requested_privacy_metric,json=requestedPrivacyMetric" json:"requested_privacy_metric,omitempty"` + // Input dataset to compute metrics over. + RequestedSourceTable *BigQueryTable `protobuf:"bytes,2,opt,name=requested_source_table,json=requestedSourceTable" json:"requested_source_table,omitempty"` + // Values associated with this metric. + // + // Types that are valid to be assigned to Result: + // *AnalyzeDataSourceRiskDetails_NumericalStatsResult_ + // *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_ + // *AnalyzeDataSourceRiskDetails_KAnonymityResult_ + // *AnalyzeDataSourceRiskDetails_LDiversityResult_ + // *AnalyzeDataSourceRiskDetails_KMapEstimationResult_ + Result isAnalyzeDataSourceRiskDetails_Result `protobuf_oneof:"result"` +} + +func (m *AnalyzeDataSourceRiskDetails) Reset() { *m = AnalyzeDataSourceRiskDetails{} } +func (m *AnalyzeDataSourceRiskDetails) String() string { return proto.CompactTextString(m) } +func (*AnalyzeDataSourceRiskDetails) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +type isAnalyzeDataSourceRiskDetails_Result interface { + isAnalyzeDataSourceRiskDetails_Result() +} + +type AnalyzeDataSourceRiskDetails_NumericalStatsResult_ struct { + NumericalStatsResult *AnalyzeDataSourceRiskDetails_NumericalStatsResult `protobuf:"bytes,3,opt,name=numerical_stats_result,json=numericalStatsResult,oneof"` +} +type AnalyzeDataSourceRiskDetails_CategoricalStatsResult_ struct { + CategoricalStatsResult *AnalyzeDataSourceRiskDetails_CategoricalStatsResult `protobuf:"bytes,4,opt,name=categorical_stats_result,json=categoricalStatsResult,oneof"` +} +type AnalyzeDataSourceRiskDetails_KAnonymityResult_ struct { + KAnonymityResult *AnalyzeDataSourceRiskDetails_KAnonymityResult `protobuf:"bytes,5,opt,name=k_anonymity_result,json=kAnonymityResult,oneof"` +} +type AnalyzeDataSourceRiskDetails_LDiversityResult_ struct { + LDiversityResult *AnalyzeDataSourceRiskDetails_LDiversityResult `protobuf:"bytes,6,opt,name=l_diversity_result,json=lDiversityResult,oneof"` +} +type AnalyzeDataSourceRiskDetails_KMapEstimationResult_ struct { + KMapEstimationResult *AnalyzeDataSourceRiskDetails_KMapEstimationResult `protobuf:"bytes,7,opt,name=k_map_estimation_result,json=kMapEstimationResult,oneof"` +} + +func (*AnalyzeDataSourceRiskDetails_NumericalStatsResult_) isAnalyzeDataSourceRiskDetails_Result() {} +func (*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_) isAnalyzeDataSourceRiskDetails_Result() {} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult_) isAnalyzeDataSourceRiskDetails_Result() {} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult_) isAnalyzeDataSourceRiskDetails_Result() {} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult_) isAnalyzeDataSourceRiskDetails_Result() {} + +func (m *AnalyzeDataSourceRiskDetails) GetResult() isAnalyzeDataSourceRiskDetails_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetRequestedPrivacyMetric() *PrivacyMetric { + if m != nil { + return m.RequestedPrivacyMetric + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetRequestedSourceTable() *BigQueryTable { + if m != nil { + return m.RequestedSourceTable + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetNumericalStatsResult() *AnalyzeDataSourceRiskDetails_NumericalStatsResult { + if x, ok := m.GetResult().(*AnalyzeDataSourceRiskDetails_NumericalStatsResult_); ok { + return x.NumericalStatsResult + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetCategoricalStatsResult() *AnalyzeDataSourceRiskDetails_CategoricalStatsResult { + if x, ok := m.GetResult().(*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_); ok { + return x.CategoricalStatsResult + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetKAnonymityResult() *AnalyzeDataSourceRiskDetails_KAnonymityResult { + if x, ok := m.GetResult().(*AnalyzeDataSourceRiskDetails_KAnonymityResult_); ok { + return x.KAnonymityResult + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetLDiversityResult() *AnalyzeDataSourceRiskDetails_LDiversityResult { + if x, ok := m.GetResult().(*AnalyzeDataSourceRiskDetails_LDiversityResult_); ok { + return x.LDiversityResult + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails) GetKMapEstimationResult() *AnalyzeDataSourceRiskDetails_KMapEstimationResult { + if x, ok := m.GetResult().(*AnalyzeDataSourceRiskDetails_KMapEstimationResult_); ok { + return x.KMapEstimationResult + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AnalyzeDataSourceRiskDetails) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AnalyzeDataSourceRiskDetails_OneofMarshaler, _AnalyzeDataSourceRiskDetails_OneofUnmarshaler, _AnalyzeDataSourceRiskDetails_OneofSizer, []interface{}{ + (*AnalyzeDataSourceRiskDetails_NumericalStatsResult_)(nil), + (*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_)(nil), + (*AnalyzeDataSourceRiskDetails_KAnonymityResult_)(nil), + (*AnalyzeDataSourceRiskDetails_LDiversityResult_)(nil), + (*AnalyzeDataSourceRiskDetails_KMapEstimationResult_)(nil), + } +} + +func _AnalyzeDataSourceRiskDetails_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AnalyzeDataSourceRiskDetails) + // result + switch x := m.Result.(type) { + case *AnalyzeDataSourceRiskDetails_NumericalStatsResult_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NumericalStatsResult); err != nil { + return err + } + case *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CategoricalStatsResult); err != nil { + return err + } + case *AnalyzeDataSourceRiskDetails_KAnonymityResult_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KAnonymityResult); err != nil { + return err + } + case *AnalyzeDataSourceRiskDetails_LDiversityResult_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.LDiversityResult); err != nil { + return err + } + case *AnalyzeDataSourceRiskDetails_KMapEstimationResult_: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KMapEstimationResult); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("AnalyzeDataSourceRiskDetails.Result has unexpected type %T", x) + } + return nil +} + +func _AnalyzeDataSourceRiskDetails_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AnalyzeDataSourceRiskDetails) + switch tag { + case 3: // result.numerical_stats_result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AnalyzeDataSourceRiskDetails_NumericalStatsResult) + err := b.DecodeMessage(msg) + m.Result = &AnalyzeDataSourceRiskDetails_NumericalStatsResult_{msg} + return true, err + case 4: // result.categorical_stats_result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AnalyzeDataSourceRiskDetails_CategoricalStatsResult) + err := b.DecodeMessage(msg) + m.Result = &AnalyzeDataSourceRiskDetails_CategoricalStatsResult_{msg} + return true, err + case 5: // result.k_anonymity_result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AnalyzeDataSourceRiskDetails_KAnonymityResult) + err := b.DecodeMessage(msg) + m.Result = &AnalyzeDataSourceRiskDetails_KAnonymityResult_{msg} + return true, err + case 6: // result.l_diversity_result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AnalyzeDataSourceRiskDetails_LDiversityResult) + err := b.DecodeMessage(msg) + m.Result = &AnalyzeDataSourceRiskDetails_LDiversityResult_{msg} + return true, err + case 7: // result.k_map_estimation_result + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AnalyzeDataSourceRiskDetails_KMapEstimationResult) + err := b.DecodeMessage(msg) + m.Result = &AnalyzeDataSourceRiskDetails_KMapEstimationResult_{msg} + return true, err + default: + return false, nil + } +} + +func _AnalyzeDataSourceRiskDetails_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AnalyzeDataSourceRiskDetails) + // result + switch x := m.Result.(type) { + case *AnalyzeDataSourceRiskDetails_NumericalStatsResult_: + s := proto.Size(x.NumericalStatsResult) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_: + s := proto.Size(x.CategoricalStatsResult) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AnalyzeDataSourceRiskDetails_KAnonymityResult_: + s := proto.Size(x.KAnonymityResult) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AnalyzeDataSourceRiskDetails_LDiversityResult_: + s := proto.Size(x.LDiversityResult) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AnalyzeDataSourceRiskDetails_KMapEstimationResult_: + s := proto.Size(x.KMapEstimationResult) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Result of the numerical stats computation. +type AnalyzeDataSourceRiskDetails_NumericalStatsResult struct { + // Minimum value appearing in the column. + MinValue *Value `protobuf:"bytes,1,opt,name=min_value,json=minValue" json:"min_value,omitempty"` + // Maximum value appearing in the column. + MaxValue *Value `protobuf:"bytes,2,opt,name=max_value,json=maxValue" json:"max_value,omitempty"` + // List of 99 values that partition the set of field values into 100 equal + // sized buckets. + QuantileValues []*Value `protobuf:"bytes,4,rep,name=quantile_values,json=quantileValues" json:"quantile_values,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_NumericalStatsResult) Reset() { + *m = AnalyzeDataSourceRiskDetails_NumericalStatsResult{} +} +func (m *AnalyzeDataSourceRiskDetails_NumericalStatsResult) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_NumericalStatsResult) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_NumericalStatsResult) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 0} +} + +func (m *AnalyzeDataSourceRiskDetails_NumericalStatsResult) GetMinValue() *Value { + if m != nil { + return m.MinValue + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_NumericalStatsResult) GetMaxValue() *Value { + if m != nil { + return m.MaxValue + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_NumericalStatsResult) GetQuantileValues() []*Value { + if m != nil { + return m.QuantileValues + } + return nil +} + +// Result of the categorical stats computation. +type AnalyzeDataSourceRiskDetails_CategoricalStatsResult struct { + // Histogram of value frequencies in the column. + ValueFrequencyHistogramBuckets []*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket `protobuf:"bytes,5,rep,name=value_frequency_histogram_buckets,json=valueFrequencyHistogramBuckets" json:"value_frequency_histogram_buckets,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult) Reset() { + *m = AnalyzeDataSourceRiskDetails_CategoricalStatsResult{} +} +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_CategoricalStatsResult) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_CategoricalStatsResult) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 1} +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult) GetValueFrequencyHistogramBuckets() []*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket { + if m != nil { + return m.ValueFrequencyHistogramBuckets + } + return nil +} + +type AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket struct { + // Lower bound on the value frequency of the values in this bucket. + ValueFrequencyLowerBound int64 `protobuf:"varint,1,opt,name=value_frequency_lower_bound,json=valueFrequencyLowerBound" json:"value_frequency_lower_bound,omitempty"` + // Upper bound on the value frequency of the values in this bucket. + ValueFrequencyUpperBound int64 `protobuf:"varint,2,opt,name=value_frequency_upper_bound,json=valueFrequencyUpperBound" json:"value_frequency_upper_bound,omitempty"` + // Total number of values in this bucket. + BucketSize int64 `protobuf:"varint,3,opt,name=bucket_size,json=bucketSize" json:"bucket_size,omitempty"` + // Sample of value frequencies in this bucket. The total number of + // values returned per bucket is capped at 20. + BucketValues []*ValueFrequency `protobuf:"bytes,4,rep,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` + // Total number of distinct values in this bucket. + BucketValueCount int64 `protobuf:"varint,5,opt,name=bucket_value_count,json=bucketValueCount" json:"bucket_value_count,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) Reset() { + *m = AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket{} +} +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) ProtoMessage() { +} +func (*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 1, 0} +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) GetValueFrequencyLowerBound() int64 { + if m != nil { + return m.ValueFrequencyLowerBound + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) GetValueFrequencyUpperBound() int64 { + if m != nil { + return m.ValueFrequencyUpperBound + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) GetBucketSize() int64 { + if m != nil { + return m.BucketSize + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) GetBucketValues() []*ValueFrequency { + if m != nil { + return m.BucketValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket) GetBucketValueCount() int64 { + if m != nil { + return m.BucketValueCount + } + return 0 +} + +// Result of the k-anonymity computation. +type AnalyzeDataSourceRiskDetails_KAnonymityResult struct { + // Histogram of k-anonymity equivalence classes. + EquivalenceClassHistogramBuckets []*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket `protobuf:"bytes,5,rep,name=equivalence_class_histogram_buckets,json=equivalenceClassHistogramBuckets" json:"equivalence_class_histogram_buckets,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult) Reset() { + *m = AnalyzeDataSourceRiskDetails_KAnonymityResult{} +} +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 2} +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult) GetEquivalenceClassHistogramBuckets() []*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket { + if m != nil { + return m.EquivalenceClassHistogramBuckets + } + return nil +} + +// The set of columns' values that share the same ldiversity value +type AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass struct { + // Set of values defining the equivalence class. One value per + // quasi-identifier column in the original KAnonymity metric message. + // The order is always the same as the original request. + QuasiIdsValues []*Value `protobuf:"bytes,1,rep,name=quasi_ids_values,json=quasiIdsValues" json:"quasi_ids_values,omitempty"` + // Size of the equivalence class, for example number of rows with the + // above set of values. + EquivalenceClassSize int64 `protobuf:"varint,2,opt,name=equivalence_class_size,json=equivalenceClassSize" json:"equivalence_class_size,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass) Reset() { + *m = AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass{} +} +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 2, 0} +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass) GetQuasiIdsValues() []*Value { + if m != nil { + return m.QuasiIdsValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass) GetEquivalenceClassSize() int64 { + if m != nil { + return m.EquivalenceClassSize + } + return 0 +} + +type AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket struct { + // Lower bound on the size of the equivalence classes in this bucket. + EquivalenceClassSizeLowerBound int64 `protobuf:"varint,1,opt,name=equivalence_class_size_lower_bound,json=equivalenceClassSizeLowerBound" json:"equivalence_class_size_lower_bound,omitempty"` + // Upper bound on the size of the equivalence classes in this bucket. + EquivalenceClassSizeUpperBound int64 `protobuf:"varint,2,opt,name=equivalence_class_size_upper_bound,json=equivalenceClassSizeUpperBound" json:"equivalence_class_size_upper_bound,omitempty"` + // Total number of equivalence classes in this bucket. + BucketSize int64 `protobuf:"varint,3,opt,name=bucket_size,json=bucketSize" json:"bucket_size,omitempty"` + // Sample of equivalence classes in this bucket. The total number of + // classes returned per bucket is capped at 20. + BucketValues []*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass `protobuf:"bytes,4,rep,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` + // Total number of distinct equivalence classes in this bucket. + BucketValueCount int64 `protobuf:"varint,5,opt,name=bucket_value_count,json=bucketValueCount" json:"bucket_value_count,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) Reset() { + *m = AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket{} +} +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 2, 1} +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) GetEquivalenceClassSizeLowerBound() int64 { + if m != nil { + return m.EquivalenceClassSizeLowerBound + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) GetEquivalenceClassSizeUpperBound() int64 { + if m != nil { + return m.EquivalenceClassSizeUpperBound + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) GetBucketSize() int64 { + if m != nil { + return m.BucketSize + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) GetBucketValues() []*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass { + if m != nil { + return m.BucketValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket) GetBucketValueCount() int64 { + if m != nil { + return m.BucketValueCount + } + return 0 +} + +// Result of the l-diversity computation. +type AnalyzeDataSourceRiskDetails_LDiversityResult struct { + // Histogram of l-diversity equivalence class sensitive value frequencies. + SensitiveValueFrequencyHistogramBuckets []*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket `protobuf:"bytes,5,rep,name=sensitive_value_frequency_histogram_buckets,json=sensitiveValueFrequencyHistogramBuckets" json:"sensitive_value_frequency_histogram_buckets,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult) Reset() { + *m = AnalyzeDataSourceRiskDetails_LDiversityResult{} +} +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 3} +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult) GetSensitiveValueFrequencyHistogramBuckets() []*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket { + if m != nil { + return m.SensitiveValueFrequencyHistogramBuckets + } + return nil +} + +// The set of columns' values that share the same ldiversity value. +type AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass struct { + // Quasi-identifier values defining the k-anonymity equivalence + // class. The order is always the same as the original request. + QuasiIdsValues []*Value `protobuf:"bytes,1,rep,name=quasi_ids_values,json=quasiIdsValues" json:"quasi_ids_values,omitempty"` + // Size of the k-anonymity equivalence class. + EquivalenceClassSize int64 `protobuf:"varint,2,opt,name=equivalence_class_size,json=equivalenceClassSize" json:"equivalence_class_size,omitempty"` + // Number of distinct sensitive values in this equivalence class. + NumDistinctSensitiveValues int64 `protobuf:"varint,3,opt,name=num_distinct_sensitive_values,json=numDistinctSensitiveValues" json:"num_distinct_sensitive_values,omitempty"` + // Estimated frequencies of top sensitive values. + TopSensitiveValues []*ValueFrequency `protobuf:"bytes,4,rep,name=top_sensitive_values,json=topSensitiveValues" json:"top_sensitive_values,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) Reset() { + *m = AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass{} +} +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 3, 0} +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) GetQuasiIdsValues() []*Value { + if m != nil { + return m.QuasiIdsValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) GetEquivalenceClassSize() int64 { + if m != nil { + return m.EquivalenceClassSize + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) GetNumDistinctSensitiveValues() int64 { + if m != nil { + return m.NumDistinctSensitiveValues + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass) GetTopSensitiveValues() []*ValueFrequency { + if m != nil { + return m.TopSensitiveValues + } + return nil +} + +type AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket struct { + // Lower bound on the sensitive value frequencies of the equivalence + // classes in this bucket. + SensitiveValueFrequencyLowerBound int64 `protobuf:"varint,1,opt,name=sensitive_value_frequency_lower_bound,json=sensitiveValueFrequencyLowerBound" json:"sensitive_value_frequency_lower_bound,omitempty"` + // Upper bound on the sensitive value frequencies of the equivalence + // classes in this bucket. + SensitiveValueFrequencyUpperBound int64 `protobuf:"varint,2,opt,name=sensitive_value_frequency_upper_bound,json=sensitiveValueFrequencyUpperBound" json:"sensitive_value_frequency_upper_bound,omitempty"` + // Total number of equivalence classes in this bucket. + BucketSize int64 `protobuf:"varint,3,opt,name=bucket_size,json=bucketSize" json:"bucket_size,omitempty"` + // Sample of equivalence classes in this bucket. The total number of + // classes returned per bucket is capped at 20. + BucketValues []*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass `protobuf:"bytes,4,rep,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` + // Total number of distinct equivalence classes in this bucket. + BucketValueCount int64 `protobuf:"varint,5,opt,name=bucket_value_count,json=bucketValueCount" json:"bucket_value_count,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) Reset() { + *m = AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket{} +} +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 3, 1} +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) GetSensitiveValueFrequencyLowerBound() int64 { + if m != nil { + return m.SensitiveValueFrequencyLowerBound + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) GetSensitiveValueFrequencyUpperBound() int64 { + if m != nil { + return m.SensitiveValueFrequencyUpperBound + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) GetBucketSize() int64 { + if m != nil { + return m.BucketSize + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) GetBucketValues() []*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass { + if m != nil { + return m.BucketValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket) GetBucketValueCount() int64 { + if m != nil { + return m.BucketValueCount + } + return 0 +} + +// Result of the reidentifiability analysis. Note that these results are an +// estimation, not exact values. +type AnalyzeDataSourceRiskDetails_KMapEstimationResult struct { + // The intervals [min_anonymity, max_anonymity] do not overlap. If a value + // doesn't correspond to any such interval, the associated frequency is + // zero. For example, the following records: + // {min_anonymity: 1, max_anonymity: 1, frequency: 17} + // {min_anonymity: 2, max_anonymity: 3, frequency: 42} + // {min_anonymity: 5, max_anonymity: 10, frequency: 99} + // mean that there are no record with an estimated anonymity of 4, 5, or + // larger than 10. + KMapEstimationHistogram []*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket `protobuf:"bytes,1,rep,name=k_map_estimation_histogram,json=kMapEstimationHistogram" json:"k_map_estimation_histogram,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult) Reset() { + *m = AnalyzeDataSourceRiskDetails_KMapEstimationResult{} +} +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 4} +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult) GetKMapEstimationHistogram() []*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket { + if m != nil { + return m.KMapEstimationHistogram + } + return nil +} + +// A tuple of values for the quasi-identifier columns. +type AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues struct { + // The quasi-identifier values. + QuasiIdsValues []*Value `protobuf:"bytes,1,rep,name=quasi_ids_values,json=quasiIdsValues" json:"quasi_ids_values,omitempty"` + // The estimated anonymity for these quasi-identifier values. + EstimatedAnonymity int64 `protobuf:"varint,2,opt,name=estimated_anonymity,json=estimatedAnonymity" json:"estimated_anonymity,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues) Reset() { + *m = AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues{} +} +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues) ProtoMessage() {} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 4, 0} +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues) GetQuasiIdsValues() []*Value { + if m != nil { + return m.QuasiIdsValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues) GetEstimatedAnonymity() int64 { + if m != nil { + return m.EstimatedAnonymity + } + return 0 +} + +// A KMapEstimationHistogramBucket message with the following values: +// min_anonymity: 3 +// max_anonymity: 5 +// frequency: 42 +// means that there are 42 records whose quasi-identifier values correspond +// to 3, 4 or 5 people in the overlying population. An important particular +// case is when min_anonymity = max_anonymity = 1: the frequency field then +// corresponds to the number of uniquely identifiable records. +type AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket struct { + // Always positive. + MinAnonymity int64 `protobuf:"varint,1,opt,name=min_anonymity,json=minAnonymity" json:"min_anonymity,omitempty"` + // Always greater than or equal to min_anonymity. + MaxAnonymity int64 `protobuf:"varint,2,opt,name=max_anonymity,json=maxAnonymity" json:"max_anonymity,omitempty"` + // Number of records within these anonymity bounds. + BucketSize int64 `protobuf:"varint,5,opt,name=bucket_size,json=bucketSize" json:"bucket_size,omitempty"` + // Sample of quasi-identifier tuple values in this bucket. The total + // number of classes returned per bucket is capped at 20. + BucketValues []*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues `protobuf:"bytes,6,rep,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` + // Total number of distinct quasi-identifier tuple values in this bucket. + BucketValueCount int64 `protobuf:"varint,7,opt,name=bucket_value_count,json=bucketValueCount" json:"bucket_value_count,omitempty"` +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) Reset() { + *m = AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket{} +} +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) String() string { + return proto.CompactTextString(m) +} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) ProtoMessage() { +} +func (*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{31, 4, 1} +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) GetMinAnonymity() int64 { + if m != nil { + return m.MinAnonymity + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) GetMaxAnonymity() int64 { + if m != nil { + return m.MaxAnonymity + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) GetBucketSize() int64 { + if m != nil { + return m.BucketSize + } + return 0 +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) GetBucketValues() []*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues { + if m != nil { + return m.BucketValues + } + return nil +} + +func (m *AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket) GetBucketValueCount() int64 { + if m != nil { + return m.BucketValueCount + } + return 0 +} + +// A value of a field, including its frequency. +type ValueFrequency struct { + // A value contained in the field in question. + Value *Value `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + // How many times the value is contained in the field. + Count int64 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` +} + +func (m *ValueFrequency) Reset() { *m = ValueFrequency{} } +func (m *ValueFrequency) String() string { return proto.CompactTextString(m) } +func (*ValueFrequency) ProtoMessage() {} +func (*ValueFrequency) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *ValueFrequency) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *ValueFrequency) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +// Set of primitive values supported by the system. +// Note that for the purposes of inspection or transformation, the number +// of bytes considered to comprise a 'Value' is based on its representation +// as a UTF-8 encoded string. For example, if 'integer_value' is set to +// 123456789, the number of bytes would be counted as 9, even though an +// int64 only holds up to 8 bytes of data. +type Value struct { + // Types that are valid to be assigned to Type: + // *Value_IntegerValue + // *Value_FloatValue + // *Value_StringValue + // *Value_BooleanValue + // *Value_TimestampValue + // *Value_TimeValue + // *Value_DateValue + // *Value_DayOfWeekValue + Type isValue_Type `protobuf_oneof:"type"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +type isValue_Type interface { + isValue_Type() +} + +type Value_IntegerValue struct { + IntegerValue int64 `protobuf:"varint,1,opt,name=integer_value,json=integerValue,oneof"` +} +type Value_FloatValue struct { + FloatValue float64 `protobuf:"fixed64,2,opt,name=float_value,json=floatValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BooleanValue struct { + BooleanValue bool `protobuf:"varint,4,opt,name=boolean_value,json=booleanValue,oneof"` +} +type Value_TimestampValue struct { + TimestampValue *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=timestamp_value,json=timestampValue,oneof"` +} +type Value_TimeValue struct { + TimeValue *google_type2.TimeOfDay `protobuf:"bytes,6,opt,name=time_value,json=timeValue,oneof"` +} +type Value_DateValue struct { + DateValue *google_type.Date `protobuf:"bytes,7,opt,name=date_value,json=dateValue,oneof"` +} +type Value_DayOfWeekValue struct { + DayOfWeekValue google_type1.DayOfWeek `protobuf:"varint,8,opt,name=day_of_week_value,json=dayOfWeekValue,enum=google.type.DayOfWeek,oneof"` +} + +func (*Value_IntegerValue) isValue_Type() {} +func (*Value_FloatValue) isValue_Type() {} +func (*Value_StringValue) isValue_Type() {} +func (*Value_BooleanValue) isValue_Type() {} +func (*Value_TimestampValue) isValue_Type() {} +func (*Value_TimeValue) isValue_Type() {} +func (*Value_DateValue) isValue_Type() {} +func (*Value_DayOfWeekValue) isValue_Type() {} + +func (m *Value) GetType() isValue_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *Value) GetIntegerValue() int64 { + if x, ok := m.GetType().(*Value_IntegerValue); ok { + return x.IntegerValue + } + return 0 +} + +func (m *Value) GetFloatValue() float64 { + if x, ok := m.GetType().(*Value_FloatValue); ok { + return x.FloatValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetType().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBooleanValue() bool { + if x, ok := m.GetType().(*Value_BooleanValue); ok { + return x.BooleanValue + } + return false +} + +func (m *Value) GetTimestampValue() *google_protobuf1.Timestamp { + if x, ok := m.GetType().(*Value_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +func (m *Value) GetTimeValue() *google_type2.TimeOfDay { + if x, ok := m.GetType().(*Value_TimeValue); ok { + return x.TimeValue + } + return nil +} + +func (m *Value) GetDateValue() *google_type.Date { + if x, ok := m.GetType().(*Value_DateValue); ok { + return x.DateValue + } + return nil +} + +func (m *Value) GetDayOfWeekValue() google_type1.DayOfWeek { + if x, ok := m.GetType().(*Value_DayOfWeekValue); ok { + return x.DayOfWeekValue + } + return google_type1.DayOfWeek_DAY_OF_WEEK_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_IntegerValue)(nil), + (*Value_FloatValue)(nil), + (*Value_StringValue)(nil), + (*Value_BooleanValue)(nil), + (*Value_TimestampValue)(nil), + (*Value_TimeValue)(nil), + (*Value_DateValue)(nil), + (*Value_DayOfWeekValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // type + switch x := m.Type.(type) { + case *Value_IntegerValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.IntegerValue)) + case *Value_FloatValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.FloatValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BooleanValue: + t := uint64(0) + if x.BooleanValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_TimestampValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimestampValue); err != nil { + return err + } + case *Value_TimeValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimeValue); err != nil { + return err + } + case *Value_DateValue: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DateValue); err != nil { + return err + } + case *Value_DayOfWeekValue: + b.EncodeVarint(8<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.DayOfWeekValue)) + case nil: + default: + return fmt.Errorf("Value.Type has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // type.integer_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &Value_IntegerValue{int64(x)} + return true, err + case 2: // type.float_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Type = &Value_FloatValue{math.Float64frombits(x)} + return true, err + case 3: // type.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Type = &Value_StringValue{x} + return true, err + case 4: // type.boolean_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &Value_BooleanValue{x != 0} + return true, err + case 5: // type.timestamp_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf1.Timestamp) + err := b.DecodeMessage(msg) + m.Type = &Value_TimestampValue{msg} + return true, err + case 6: // type.time_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type2.TimeOfDay) + err := b.DecodeMessage(msg) + m.Type = &Value_TimeValue{msg} + return true, err + case 7: // type.date_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type.Date) + err := b.DecodeMessage(msg) + m.Type = &Value_DateValue{msg} + return true, err + case 8: // type.day_of_week_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Type = &Value_DayOfWeekValue{google_type1.DayOfWeek(x)} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // type + switch x := m.Type.(type) { + case *Value_IntegerValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntegerValue)) + case *Value_FloatValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BooleanValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_TimestampValue: + s := proto.Size(x.TimestampValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_TimeValue: + s := proto.Size(x.TimeValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_DateValue: + s := proto.Size(x.DateValue) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_DayOfWeekValue: + n += proto.SizeVarint(8<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.DayOfWeekValue)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for infoType-dependent details parsed from quote. +type QuoteInfo struct { + // Object representation of the quote. + // + // Types that are valid to be assigned to ParsedQuote: + // *QuoteInfo_DateTime + ParsedQuote isQuoteInfo_ParsedQuote `protobuf_oneof:"parsed_quote"` +} + +func (m *QuoteInfo) Reset() { *m = QuoteInfo{} } +func (m *QuoteInfo) String() string { return proto.CompactTextString(m) } +func (*QuoteInfo) ProtoMessage() {} +func (*QuoteInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +type isQuoteInfo_ParsedQuote interface { + isQuoteInfo_ParsedQuote() +} + +type QuoteInfo_DateTime struct { + DateTime *DateTime `protobuf:"bytes,2,opt,name=date_time,json=dateTime,oneof"` +} + +func (*QuoteInfo_DateTime) isQuoteInfo_ParsedQuote() {} + +func (m *QuoteInfo) GetParsedQuote() isQuoteInfo_ParsedQuote { + if m != nil { + return m.ParsedQuote + } + return nil +} + +func (m *QuoteInfo) GetDateTime() *DateTime { + if x, ok := m.GetParsedQuote().(*QuoteInfo_DateTime); ok { + return x.DateTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*QuoteInfo) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _QuoteInfo_OneofMarshaler, _QuoteInfo_OneofUnmarshaler, _QuoteInfo_OneofSizer, []interface{}{ + (*QuoteInfo_DateTime)(nil), + } +} + +func _QuoteInfo_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*QuoteInfo) + // parsed_quote + switch x := m.ParsedQuote.(type) { + case *QuoteInfo_DateTime: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DateTime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("QuoteInfo.ParsedQuote has unexpected type %T", x) + } + return nil +} + +func _QuoteInfo_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*QuoteInfo) + switch tag { + case 2: // parsed_quote.date_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DateTime) + err := b.DecodeMessage(msg) + m.ParsedQuote = &QuoteInfo_DateTime{msg} + return true, err + default: + return false, nil + } +} + +func _QuoteInfo_OneofSizer(msg proto.Message) (n int) { + m := msg.(*QuoteInfo) + // parsed_quote + switch x := m.ParsedQuote.(type) { + case *QuoteInfo_DateTime: + s := proto.Size(x.DateTime) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for a date time object. +type DateTime struct { + // One or more of the following must be set. All fields are optional, but + // when set must be valid date or time values. + Date *google_type.Date `protobuf:"bytes,1,opt,name=date" json:"date,omitempty"` + DayOfWeek google_type1.DayOfWeek `protobuf:"varint,2,opt,name=day_of_week,json=dayOfWeek,enum=google.type.DayOfWeek" json:"day_of_week,omitempty"` + Time *google_type2.TimeOfDay `protobuf:"bytes,3,opt,name=time" json:"time,omitempty"` + TimeZone *DateTime_TimeZone `protobuf:"bytes,4,opt,name=time_zone,json=timeZone" json:"time_zone,omitempty"` +} + +func (m *DateTime) Reset() { *m = DateTime{} } +func (m *DateTime) String() string { return proto.CompactTextString(m) } +func (*DateTime) ProtoMessage() {} +func (*DateTime) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *DateTime) GetDate() *google_type.Date { + if m != nil { + return m.Date + } + return nil +} + +func (m *DateTime) GetDayOfWeek() google_type1.DayOfWeek { + if m != nil { + return m.DayOfWeek + } + return google_type1.DayOfWeek_DAY_OF_WEEK_UNSPECIFIED +} + +func (m *DateTime) GetTime() *google_type2.TimeOfDay { + if m != nil { + return m.Time + } + return nil +} + +func (m *DateTime) GetTimeZone() *DateTime_TimeZone { + if m != nil { + return m.TimeZone + } + return nil +} + +type DateTime_TimeZone struct { + // Set only if the offset can be determined. Positive for time ahead of UTC. + // E.g. For "UTC-9", this value is -540. + OffsetMinutes int32 `protobuf:"varint,1,opt,name=offset_minutes,json=offsetMinutes" json:"offset_minutes,omitempty"` +} + +func (m *DateTime_TimeZone) Reset() { *m = DateTime_TimeZone{} } +func (m *DateTime_TimeZone) String() string { return proto.CompactTextString(m) } +func (*DateTime_TimeZone) ProtoMessage() {} +func (*DateTime_TimeZone) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35, 0} } + +func (m *DateTime_TimeZone) GetOffsetMinutes() int32 { + if m != nil { + return m.OffsetMinutes + } + return 0 +} + +// The configuration that controls how the data will change. +type DeidentifyConfig struct { + // Types that are valid to be assigned to Transformation: + // *DeidentifyConfig_InfoTypeTransformations + // *DeidentifyConfig_RecordTransformations + Transformation isDeidentifyConfig_Transformation `protobuf_oneof:"transformation"` +} + +func (m *DeidentifyConfig) Reset() { *m = DeidentifyConfig{} } +func (m *DeidentifyConfig) String() string { return proto.CompactTextString(m) } +func (*DeidentifyConfig) ProtoMessage() {} +func (*DeidentifyConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +type isDeidentifyConfig_Transformation interface { + isDeidentifyConfig_Transformation() +} + +type DeidentifyConfig_InfoTypeTransformations struct { + InfoTypeTransformations *InfoTypeTransformations `protobuf:"bytes,1,opt,name=info_type_transformations,json=infoTypeTransformations,oneof"` +} +type DeidentifyConfig_RecordTransformations struct { + RecordTransformations *RecordTransformations `protobuf:"bytes,2,opt,name=record_transformations,json=recordTransformations,oneof"` +} + +func (*DeidentifyConfig_InfoTypeTransformations) isDeidentifyConfig_Transformation() {} +func (*DeidentifyConfig_RecordTransformations) isDeidentifyConfig_Transformation() {} + +func (m *DeidentifyConfig) GetTransformation() isDeidentifyConfig_Transformation { + if m != nil { + return m.Transformation + } + return nil +} + +func (m *DeidentifyConfig) GetInfoTypeTransformations() *InfoTypeTransformations { + if x, ok := m.GetTransformation().(*DeidentifyConfig_InfoTypeTransformations); ok { + return x.InfoTypeTransformations + } + return nil +} + +func (m *DeidentifyConfig) GetRecordTransformations() *RecordTransformations { + if x, ok := m.GetTransformation().(*DeidentifyConfig_RecordTransformations); ok { + return x.RecordTransformations + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DeidentifyConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DeidentifyConfig_OneofMarshaler, _DeidentifyConfig_OneofUnmarshaler, _DeidentifyConfig_OneofSizer, []interface{}{ + (*DeidentifyConfig_InfoTypeTransformations)(nil), + (*DeidentifyConfig_RecordTransformations)(nil), + } +} + +func _DeidentifyConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DeidentifyConfig) + // transformation + switch x := m.Transformation.(type) { + case *DeidentifyConfig_InfoTypeTransformations: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InfoTypeTransformations); err != nil { + return err + } + case *DeidentifyConfig_RecordTransformations: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RecordTransformations); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DeidentifyConfig.Transformation has unexpected type %T", x) + } + return nil +} + +func _DeidentifyConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DeidentifyConfig) + switch tag { + case 1: // transformation.info_type_transformations + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InfoTypeTransformations) + err := b.DecodeMessage(msg) + m.Transformation = &DeidentifyConfig_InfoTypeTransformations{msg} + return true, err + case 2: // transformation.record_transformations + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RecordTransformations) + err := b.DecodeMessage(msg) + m.Transformation = &DeidentifyConfig_RecordTransformations{msg} + return true, err + default: + return false, nil + } +} + +func _DeidentifyConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DeidentifyConfig) + // transformation + switch x := m.Transformation.(type) { + case *DeidentifyConfig_InfoTypeTransformations: + s := proto.Size(x.InfoTypeTransformations) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *DeidentifyConfig_RecordTransformations: + s := proto.Size(x.RecordTransformations) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A rule for transforming a value. +type PrimitiveTransformation struct { + // Types that are valid to be assigned to Transformation: + // *PrimitiveTransformation_ReplaceConfig + // *PrimitiveTransformation_RedactConfig + // *PrimitiveTransformation_CharacterMaskConfig + // *PrimitiveTransformation_CryptoReplaceFfxFpeConfig + // *PrimitiveTransformation_FixedSizeBucketingConfig + // *PrimitiveTransformation_BucketingConfig + // *PrimitiveTransformation_ReplaceWithInfoTypeConfig + // *PrimitiveTransformation_TimePartConfig + // *PrimitiveTransformation_CryptoHashConfig + // *PrimitiveTransformation_DateShiftConfig + Transformation isPrimitiveTransformation_Transformation `protobuf_oneof:"transformation"` +} + +func (m *PrimitiveTransformation) Reset() { *m = PrimitiveTransformation{} } +func (m *PrimitiveTransformation) String() string { return proto.CompactTextString(m) } +func (*PrimitiveTransformation) ProtoMessage() {} +func (*PrimitiveTransformation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isPrimitiveTransformation_Transformation interface { + isPrimitiveTransformation_Transformation() +} + +type PrimitiveTransformation_ReplaceConfig struct { + ReplaceConfig *ReplaceValueConfig `protobuf:"bytes,1,opt,name=replace_config,json=replaceConfig,oneof"` +} +type PrimitiveTransformation_RedactConfig struct { + RedactConfig *RedactConfig `protobuf:"bytes,2,opt,name=redact_config,json=redactConfig,oneof"` +} +type PrimitiveTransformation_CharacterMaskConfig struct { + CharacterMaskConfig *CharacterMaskConfig `protobuf:"bytes,3,opt,name=character_mask_config,json=characterMaskConfig,oneof"` +} +type PrimitiveTransformation_CryptoReplaceFfxFpeConfig struct { + CryptoReplaceFfxFpeConfig *CryptoReplaceFfxFpeConfig `protobuf:"bytes,4,opt,name=crypto_replace_ffx_fpe_config,json=cryptoReplaceFfxFpeConfig,oneof"` +} +type PrimitiveTransformation_FixedSizeBucketingConfig struct { + FixedSizeBucketingConfig *FixedSizeBucketingConfig `protobuf:"bytes,5,opt,name=fixed_size_bucketing_config,json=fixedSizeBucketingConfig,oneof"` +} +type PrimitiveTransformation_BucketingConfig struct { + BucketingConfig *BucketingConfig `protobuf:"bytes,6,opt,name=bucketing_config,json=bucketingConfig,oneof"` +} +type PrimitiveTransformation_ReplaceWithInfoTypeConfig struct { + ReplaceWithInfoTypeConfig *ReplaceWithInfoTypeConfig `protobuf:"bytes,7,opt,name=replace_with_info_type_config,json=replaceWithInfoTypeConfig,oneof"` +} +type PrimitiveTransformation_TimePartConfig struct { + TimePartConfig *TimePartConfig `protobuf:"bytes,8,opt,name=time_part_config,json=timePartConfig,oneof"` +} +type PrimitiveTransformation_CryptoHashConfig struct { + CryptoHashConfig *CryptoHashConfig `protobuf:"bytes,9,opt,name=crypto_hash_config,json=cryptoHashConfig,oneof"` +} +type PrimitiveTransformation_DateShiftConfig struct { + DateShiftConfig *DateShiftConfig `protobuf:"bytes,11,opt,name=date_shift_config,json=dateShiftConfig,oneof"` +} + +func (*PrimitiveTransformation_ReplaceConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_RedactConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_CharacterMaskConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_CryptoReplaceFfxFpeConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_FixedSizeBucketingConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_BucketingConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_ReplaceWithInfoTypeConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_TimePartConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_CryptoHashConfig) isPrimitiveTransformation_Transformation() {} +func (*PrimitiveTransformation_DateShiftConfig) isPrimitiveTransformation_Transformation() {} + +func (m *PrimitiveTransformation) GetTransformation() isPrimitiveTransformation_Transformation { + if m != nil { + return m.Transformation + } + return nil +} + +func (m *PrimitiveTransformation) GetReplaceConfig() *ReplaceValueConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_ReplaceConfig); ok { + return x.ReplaceConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetRedactConfig() *RedactConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_RedactConfig); ok { + return x.RedactConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetCharacterMaskConfig() *CharacterMaskConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_CharacterMaskConfig); ok { + return x.CharacterMaskConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetCryptoReplaceFfxFpeConfig() *CryptoReplaceFfxFpeConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_CryptoReplaceFfxFpeConfig); ok { + return x.CryptoReplaceFfxFpeConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetFixedSizeBucketingConfig() *FixedSizeBucketingConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_FixedSizeBucketingConfig); ok { + return x.FixedSizeBucketingConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetBucketingConfig() *BucketingConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_BucketingConfig); ok { + return x.BucketingConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetReplaceWithInfoTypeConfig() *ReplaceWithInfoTypeConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_ReplaceWithInfoTypeConfig); ok { + return x.ReplaceWithInfoTypeConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetTimePartConfig() *TimePartConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_TimePartConfig); ok { + return x.TimePartConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetCryptoHashConfig() *CryptoHashConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_CryptoHashConfig); ok { + return x.CryptoHashConfig + } + return nil +} + +func (m *PrimitiveTransformation) GetDateShiftConfig() *DateShiftConfig { + if x, ok := m.GetTransformation().(*PrimitiveTransformation_DateShiftConfig); ok { + return x.DateShiftConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PrimitiveTransformation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PrimitiveTransformation_OneofMarshaler, _PrimitiveTransformation_OneofUnmarshaler, _PrimitiveTransformation_OneofSizer, []interface{}{ + (*PrimitiveTransformation_ReplaceConfig)(nil), + (*PrimitiveTransformation_RedactConfig)(nil), + (*PrimitiveTransformation_CharacterMaskConfig)(nil), + (*PrimitiveTransformation_CryptoReplaceFfxFpeConfig)(nil), + (*PrimitiveTransformation_FixedSizeBucketingConfig)(nil), + (*PrimitiveTransformation_BucketingConfig)(nil), + (*PrimitiveTransformation_ReplaceWithInfoTypeConfig)(nil), + (*PrimitiveTransformation_TimePartConfig)(nil), + (*PrimitiveTransformation_CryptoHashConfig)(nil), + (*PrimitiveTransformation_DateShiftConfig)(nil), + } +} + +func _PrimitiveTransformation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PrimitiveTransformation) + // transformation + switch x := m.Transformation.(type) { + case *PrimitiveTransformation_ReplaceConfig: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReplaceConfig); err != nil { + return err + } + case *PrimitiveTransformation_RedactConfig: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RedactConfig); err != nil { + return err + } + case *PrimitiveTransformation_CharacterMaskConfig: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CharacterMaskConfig); err != nil { + return err + } + case *PrimitiveTransformation_CryptoReplaceFfxFpeConfig: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CryptoReplaceFfxFpeConfig); err != nil { + return err + } + case *PrimitiveTransformation_FixedSizeBucketingConfig: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FixedSizeBucketingConfig); err != nil { + return err + } + case *PrimitiveTransformation_BucketingConfig: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketingConfig); err != nil { + return err + } + case *PrimitiveTransformation_ReplaceWithInfoTypeConfig: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ReplaceWithInfoTypeConfig); err != nil { + return err + } + case *PrimitiveTransformation_TimePartConfig: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TimePartConfig); err != nil { + return err + } + case *PrimitiveTransformation_CryptoHashConfig: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CryptoHashConfig); err != nil { + return err + } + case *PrimitiveTransformation_DateShiftConfig: + b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DateShiftConfig); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PrimitiveTransformation.Transformation has unexpected type %T", x) + } + return nil +} + +func _PrimitiveTransformation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PrimitiveTransformation) + switch tag { + case 1: // transformation.replace_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReplaceValueConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_ReplaceConfig{msg} + return true, err + case 2: // transformation.redact_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RedactConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_RedactConfig{msg} + return true, err + case 3: // transformation.character_mask_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CharacterMaskConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_CharacterMaskConfig{msg} + return true, err + case 4: // transformation.crypto_replace_ffx_fpe_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CryptoReplaceFfxFpeConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_CryptoReplaceFfxFpeConfig{msg} + return true, err + case 5: // transformation.fixed_size_bucketing_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FixedSizeBucketingConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_FixedSizeBucketingConfig{msg} + return true, err + case 6: // transformation.bucketing_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketingConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_BucketingConfig{msg} + return true, err + case 7: // transformation.replace_with_info_type_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReplaceWithInfoTypeConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_ReplaceWithInfoTypeConfig{msg} + return true, err + case 8: // transformation.time_part_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TimePartConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_TimePartConfig{msg} + return true, err + case 9: // transformation.crypto_hash_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CryptoHashConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_CryptoHashConfig{msg} + return true, err + case 11: // transformation.date_shift_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DateShiftConfig) + err := b.DecodeMessage(msg) + m.Transformation = &PrimitiveTransformation_DateShiftConfig{msg} + return true, err + default: + return false, nil + } +} + +func _PrimitiveTransformation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PrimitiveTransformation) + // transformation + switch x := m.Transformation.(type) { + case *PrimitiveTransformation_ReplaceConfig: + s := proto.Size(x.ReplaceConfig) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_RedactConfig: + s := proto.Size(x.RedactConfig) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_CharacterMaskConfig: + s := proto.Size(x.CharacterMaskConfig) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_CryptoReplaceFfxFpeConfig: + s := proto.Size(x.CryptoReplaceFfxFpeConfig) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_FixedSizeBucketingConfig: + s := proto.Size(x.FixedSizeBucketingConfig) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_BucketingConfig: + s := proto.Size(x.BucketingConfig) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_ReplaceWithInfoTypeConfig: + s := proto.Size(x.ReplaceWithInfoTypeConfig) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_TimePartConfig: + s := proto.Size(x.TimePartConfig) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_CryptoHashConfig: + s := proto.Size(x.CryptoHashConfig) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *PrimitiveTransformation_DateShiftConfig: + s := proto.Size(x.DateShiftConfig) + n += proto.SizeVarint(11<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// For use with `Date`, `Timestamp`, and `TimeOfDay`, extract or preserve a +// portion of the value. +type TimePartConfig struct { + PartToExtract TimePartConfig_TimePart `protobuf:"varint,1,opt,name=part_to_extract,json=partToExtract,enum=google.privacy.dlp.v2.TimePartConfig_TimePart" json:"part_to_extract,omitempty"` +} + +func (m *TimePartConfig) Reset() { *m = TimePartConfig{} } +func (m *TimePartConfig) String() string { return proto.CompactTextString(m) } +func (*TimePartConfig) ProtoMessage() {} +func (*TimePartConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *TimePartConfig) GetPartToExtract() TimePartConfig_TimePart { + if m != nil { + return m.PartToExtract + } + return TimePartConfig_TIME_PART_UNSPECIFIED +} + +// Pseudonymization method that generates surrogates via cryptographic hashing. +// Uses SHA-256. +// The key size must be either 32 or 64 bytes. +// Outputs a 32 byte digest as an uppercase hex string +// (for example, 41D1567F7F99F1DC2A5FAB886DEE5BEE). +// Currently, only string and integer values can be hashed. +type CryptoHashConfig struct { + // The key used by the hash function. + CryptoKey *CryptoKey `protobuf:"bytes,1,opt,name=crypto_key,json=cryptoKey" json:"crypto_key,omitempty"` +} + +func (m *CryptoHashConfig) Reset() { *m = CryptoHashConfig{} } +func (m *CryptoHashConfig) String() string { return proto.CompactTextString(m) } +func (*CryptoHashConfig) ProtoMessage() {} +func (*CryptoHashConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +func (m *CryptoHashConfig) GetCryptoKey() *CryptoKey { + if m != nil { + return m.CryptoKey + } + return nil +} + +// Replace each input value with a given `Value`. +type ReplaceValueConfig struct { + // Value to replace it with. + NewValue *Value `protobuf:"bytes,1,opt,name=new_value,json=newValue" json:"new_value,omitempty"` +} + +func (m *ReplaceValueConfig) Reset() { *m = ReplaceValueConfig{} } +func (m *ReplaceValueConfig) String() string { return proto.CompactTextString(m) } +func (*ReplaceValueConfig) ProtoMessage() {} +func (*ReplaceValueConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *ReplaceValueConfig) GetNewValue() *Value { + if m != nil { + return m.NewValue + } + return nil +} + +// Replace each matching finding with the name of the info_type. +type ReplaceWithInfoTypeConfig struct { +} + +func (m *ReplaceWithInfoTypeConfig) Reset() { *m = ReplaceWithInfoTypeConfig{} } +func (m *ReplaceWithInfoTypeConfig) String() string { return proto.CompactTextString(m) } +func (*ReplaceWithInfoTypeConfig) ProtoMessage() {} +func (*ReplaceWithInfoTypeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +// Redact a given value. For example, if used with an `InfoTypeTransformation` +// transforming PHONE_NUMBER, and input 'My phone number is 206-555-0123', the +// output would be 'My phone number is '. +type RedactConfig struct { +} + +func (m *RedactConfig) Reset() { *m = RedactConfig{} } +func (m *RedactConfig) String() string { return proto.CompactTextString(m) } +func (*RedactConfig) ProtoMessage() {} +func (*RedactConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +// Characters to skip when doing deidentification of a value. These will be left +// alone and skipped. +type CharsToIgnore struct { + // Types that are valid to be assigned to Characters: + // *CharsToIgnore_CharactersToSkip + // *CharsToIgnore_CommonCharactersToIgnore + Characters isCharsToIgnore_Characters `protobuf_oneof:"characters"` +} + +func (m *CharsToIgnore) Reset() { *m = CharsToIgnore{} } +func (m *CharsToIgnore) String() string { return proto.CompactTextString(m) } +func (*CharsToIgnore) ProtoMessage() {} +func (*CharsToIgnore) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +type isCharsToIgnore_Characters interface { + isCharsToIgnore_Characters() +} + +type CharsToIgnore_CharactersToSkip struct { + CharactersToSkip string `protobuf:"bytes,1,opt,name=characters_to_skip,json=charactersToSkip,oneof"` +} +type CharsToIgnore_CommonCharactersToIgnore struct { + CommonCharactersToIgnore CharsToIgnore_CommonCharsToIgnore `protobuf:"varint,2,opt,name=common_characters_to_ignore,json=commonCharactersToIgnore,enum=google.privacy.dlp.v2.CharsToIgnore_CommonCharsToIgnore,oneof"` +} + +func (*CharsToIgnore_CharactersToSkip) isCharsToIgnore_Characters() {} +func (*CharsToIgnore_CommonCharactersToIgnore) isCharsToIgnore_Characters() {} + +func (m *CharsToIgnore) GetCharacters() isCharsToIgnore_Characters { + if m != nil { + return m.Characters + } + return nil +} + +func (m *CharsToIgnore) GetCharactersToSkip() string { + if x, ok := m.GetCharacters().(*CharsToIgnore_CharactersToSkip); ok { + return x.CharactersToSkip + } + return "" +} + +func (m *CharsToIgnore) GetCommonCharactersToIgnore() CharsToIgnore_CommonCharsToIgnore { + if x, ok := m.GetCharacters().(*CharsToIgnore_CommonCharactersToIgnore); ok { + return x.CommonCharactersToIgnore + } + return CharsToIgnore_COMMON_CHARS_TO_IGNORE_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CharsToIgnore) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CharsToIgnore_OneofMarshaler, _CharsToIgnore_OneofUnmarshaler, _CharsToIgnore_OneofSizer, []interface{}{ + (*CharsToIgnore_CharactersToSkip)(nil), + (*CharsToIgnore_CommonCharactersToIgnore)(nil), + } +} + +func _CharsToIgnore_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CharsToIgnore) + // characters + switch x := m.Characters.(type) { + case *CharsToIgnore_CharactersToSkip: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.CharactersToSkip) + case *CharsToIgnore_CommonCharactersToIgnore: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CommonCharactersToIgnore)) + case nil: + default: + return fmt.Errorf("CharsToIgnore.Characters has unexpected type %T", x) + } + return nil +} + +func _CharsToIgnore_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CharsToIgnore) + switch tag { + case 1: // characters.characters_to_skip + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Characters = &CharsToIgnore_CharactersToSkip{x} + return true, err + case 2: // characters.common_characters_to_ignore + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Characters = &CharsToIgnore_CommonCharactersToIgnore{CharsToIgnore_CommonCharsToIgnore(x)} + return true, err + default: + return false, nil + } +} + +func _CharsToIgnore_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CharsToIgnore) + // characters + switch x := m.Characters.(type) { + case *CharsToIgnore_CharactersToSkip: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.CharactersToSkip))) + n += len(x.CharactersToSkip) + case *CharsToIgnore_CommonCharactersToIgnore: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CommonCharactersToIgnore)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Partially mask a string by replacing a given number of characters with a +// fixed character. Masking can start from the beginning or end of the string. +// This can be used on data of any type (numbers, longs, and so on) and when +// de-identifying structured data we'll attempt to preserve the original data's +// type. (This allows you to take a long like 123 and modify it to a string like +// **3. +type CharacterMaskConfig struct { + // Character to mask the sensitive values—for example, "*" for an + // alphabetic string such as name, or "0" for a numeric string such as ZIP + // code or credit card number. String must have length 1. If not supplied, we + // will default to "*" for strings, 0 for digits. + MaskingCharacter string `protobuf:"bytes,1,opt,name=masking_character,json=maskingCharacter" json:"masking_character,omitempty"` + // Number of characters to mask. If not set, all matching chars will be + // masked. Skipped characters do not count towards this tally. + NumberToMask int32 `protobuf:"varint,2,opt,name=number_to_mask,json=numberToMask" json:"number_to_mask,omitempty"` + // Mask characters in reverse order. For example, if `masking_character` is + // '0', number_to_mask is 14, and `reverse_order` is false, then + // 1234-5678-9012-3456 -> 00000000000000-3456 + // If `masking_character` is '*', `number_to_mask` is 3, and `reverse_order` + // is true, then 12345 -> 12*** + ReverseOrder bool `protobuf:"varint,3,opt,name=reverse_order,json=reverseOrder" json:"reverse_order,omitempty"` + // When masking a string, items in this list will be skipped when replacing. + // For example, if your string is 555-555-5555 and you ask us to skip `-` and + // mask 5 chars with * we would produce ***-*55-5555. + CharactersToIgnore []*CharsToIgnore `protobuf:"bytes,4,rep,name=characters_to_ignore,json=charactersToIgnore" json:"characters_to_ignore,omitempty"` +} + +func (m *CharacterMaskConfig) Reset() { *m = CharacterMaskConfig{} } +func (m *CharacterMaskConfig) String() string { return proto.CompactTextString(m) } +func (*CharacterMaskConfig) ProtoMessage() {} +func (*CharacterMaskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *CharacterMaskConfig) GetMaskingCharacter() string { + if m != nil { + return m.MaskingCharacter + } + return "" +} + +func (m *CharacterMaskConfig) GetNumberToMask() int32 { + if m != nil { + return m.NumberToMask + } + return 0 +} + +func (m *CharacterMaskConfig) GetReverseOrder() bool { + if m != nil { + return m.ReverseOrder + } + return false +} + +func (m *CharacterMaskConfig) GetCharactersToIgnore() []*CharsToIgnore { + if m != nil { + return m.CharactersToIgnore + } + return nil +} + +// Buckets values based on fixed size ranges. The +// Bucketing transformation can provide all of this functionality, +// but requires more configuration. This message is provided as a convenience to +// the user for simple bucketing strategies. +// +// The transformed value will be a hyphenated string of +// -, i.e if lower_bound = 10 and upper_bound = 20 +// all values that are within this bucket will be replaced with "10-20". +// +// This can be used on data of type: double, long. +// +// If the bound Value type differs from the type of data +// being transformed, we will first attempt converting the type of the data to +// be transformed to match the type of the bound before comparing. +type FixedSizeBucketingConfig struct { + // Lower bound value of buckets. All values less than `lower_bound` are + // grouped together into a single bucket; for example if `lower_bound` = 10, + // then all values less than 10 are replaced with the value “-10”. [Required]. + LowerBound *Value `protobuf:"bytes,1,opt,name=lower_bound,json=lowerBound" json:"lower_bound,omitempty"` + // Upper bound value of buckets. All values greater than upper_bound are + // grouped together into a single bucket; for example if `upper_bound` = 89, + // then all values greater than 89 are replaced with the value “89+”. + // [Required]. + UpperBound *Value `protobuf:"bytes,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + // Size of each bucket (except for minimum and maximum buckets). So if + // `lower_bound` = 10, `upper_bound` = 89, and `bucket_size` = 10, then the + // following buckets would be used: -10, 10-20, 20-30, 30-40, 40-50, 50-60, + // 60-70, 70-80, 80-89, 89+. Precision up to 2 decimals works. [Required]. + BucketSize float64 `protobuf:"fixed64,3,opt,name=bucket_size,json=bucketSize" json:"bucket_size,omitempty"` +} + +func (m *FixedSizeBucketingConfig) Reset() { *m = FixedSizeBucketingConfig{} } +func (m *FixedSizeBucketingConfig) String() string { return proto.CompactTextString(m) } +func (*FixedSizeBucketingConfig) ProtoMessage() {} +func (*FixedSizeBucketingConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *FixedSizeBucketingConfig) GetLowerBound() *Value { + if m != nil { + return m.LowerBound + } + return nil +} + +func (m *FixedSizeBucketingConfig) GetUpperBound() *Value { + if m != nil { + return m.UpperBound + } + return nil +} + +func (m *FixedSizeBucketingConfig) GetBucketSize() float64 { + if m != nil { + return m.BucketSize + } + return 0 +} + +// Generalization function that buckets values based on ranges. The ranges and +// replacement values are dynamically provided by the user for custom behavior, +// such as 1-30 -> LOW 31-65 -> MEDIUM 66-100 -> HIGH +// This can be used on +// data of type: number, long, string, timestamp. +// If the bound `Value` type differs from the type of data being transformed, we +// will first attempt converting the type of the data to be transformed to match +// the type of the bound before comparing. +type BucketingConfig struct { + // Set of buckets. Ranges must be non-overlapping. + Buckets []*BucketingConfig_Bucket `protobuf:"bytes,1,rep,name=buckets" json:"buckets,omitempty"` +} + +func (m *BucketingConfig) Reset() { *m = BucketingConfig{} } +func (m *BucketingConfig) String() string { return proto.CompactTextString(m) } +func (*BucketingConfig) ProtoMessage() {} +func (*BucketingConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *BucketingConfig) GetBuckets() []*BucketingConfig_Bucket { + if m != nil { + return m.Buckets + } + return nil +} + +// Bucket is represented as a range, along with replacement values. +type BucketingConfig_Bucket struct { + // Lower bound of the range, inclusive. Type should be the same as max if + // used. + Min *Value `protobuf:"bytes,1,opt,name=min" json:"min,omitempty"` + // Upper bound of the range, exclusive; type must match min. + Max *Value `protobuf:"bytes,2,opt,name=max" json:"max,omitempty"` + // Replacement value for this bucket. If not provided + // the default behavior will be to hyphenate the min-max range. + ReplacementValue *Value `protobuf:"bytes,3,opt,name=replacement_value,json=replacementValue" json:"replacement_value,omitempty"` +} + +func (m *BucketingConfig_Bucket) Reset() { *m = BucketingConfig_Bucket{} } +func (m *BucketingConfig_Bucket) String() string { return proto.CompactTextString(m) } +func (*BucketingConfig_Bucket) ProtoMessage() {} +func (*BucketingConfig_Bucket) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46, 0} } + +func (m *BucketingConfig_Bucket) GetMin() *Value { + if m != nil { + return m.Min + } + return nil +} + +func (m *BucketingConfig_Bucket) GetMax() *Value { + if m != nil { + return m.Max + } + return nil +} + +func (m *BucketingConfig_Bucket) GetReplacementValue() *Value { + if m != nil { + return m.ReplacementValue + } + return nil +} + +// Replaces an identifier with a surrogate using FPE with the FFX +// mode of operation; however when used in the `ReidentifyContent` API method, +// it serves the opposite function by reversing the surrogate back into +// the original identifier. +// The identifier must be encoded as ASCII. +// For a given crypto key and context, the same identifier will be +// replaced with the same surrogate. +// Identifiers must be at least two characters long. +// In the case that the identifier is the empty string, it will be skipped. +// See [Pseudonymization](/dlp/docs/pseudonymization) for example usage. +type CryptoReplaceFfxFpeConfig struct { + // The key used by the encryption algorithm. [required] + CryptoKey *CryptoKey `protobuf:"bytes,1,opt,name=crypto_key,json=cryptoKey" json:"crypto_key,omitempty"` + // The 'tweak', a context may be used for higher security since the same + // identifier in two different contexts won't be given the same surrogate. If + // the context is not set, a default tweak will be used. + // + // If the context is set but: + // + // 1. there is no record present when transforming a given value or + // 1. the field is not present when transforming a given value, + // + // a default tweak will be used. + // + // Note that case (1) is expected when an `InfoTypeTransformation` is + // applied to both structured and non-structured `ContentItem`s. + // Currently, the referenced field may be of value type integer or string. + // + // The tweak is constructed as a sequence of bytes in big endian byte order + // such that: + // + // - a 64 bit integer is encoded followed by a single byte of value 1 + // - a string is encoded in UTF-8 format followed by a single byte of value + // å 2 + Context *FieldId `protobuf:"bytes,2,opt,name=context" json:"context,omitempty"` + // Types that are valid to be assigned to Alphabet: + // *CryptoReplaceFfxFpeConfig_CommonAlphabet + // *CryptoReplaceFfxFpeConfig_CustomAlphabet + // *CryptoReplaceFfxFpeConfig_Radix + Alphabet isCryptoReplaceFfxFpeConfig_Alphabet `protobuf_oneof:"alphabet"` + // The custom infoType to annotate the surrogate with. + // This annotation will be applied to the surrogate by prefixing it with + // the name of the custom infoType followed by the number of + // characters comprising the surrogate. The following scheme defines the + // format: info_type_name(surrogate_character_count):surrogate + // + // For example, if the name of custom infoType is 'MY_TOKEN_INFO_TYPE' and + // the surrogate is 'abc', the full replacement value + // will be: 'MY_TOKEN_INFO_TYPE(3):abc' + // + // This annotation identifies the surrogate when inspecting content using the + // custom infoType + // [`SurrogateType`](/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). + // This facilitates reversal of the surrogate when it occurs in free text. + // + // In order for inspection to work properly, the name of this infoType must + // not occur naturally anywhere in your data; otherwise, inspection may + // find a surrogate that does not correspond to an actual identifier. + // Therefore, choose your custom infoType name carefully after considering + // what your data looks like. One way to select a name that has a high chance + // of yielding reliable detection is to include one or more unicode characters + // that are highly improbable to exist in your data. + // For example, assuming your data is entered from a regular ASCII keyboard, + // the symbol with the hex code point 29DD might be used like so: + // ⧝MY_TOKEN_TYPE + SurrogateInfoType *InfoType `protobuf:"bytes,8,opt,name=surrogate_info_type,json=surrogateInfoType" json:"surrogate_info_type,omitempty"` +} + +func (m *CryptoReplaceFfxFpeConfig) Reset() { *m = CryptoReplaceFfxFpeConfig{} } +func (m *CryptoReplaceFfxFpeConfig) String() string { return proto.CompactTextString(m) } +func (*CryptoReplaceFfxFpeConfig) ProtoMessage() {} +func (*CryptoReplaceFfxFpeConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +type isCryptoReplaceFfxFpeConfig_Alphabet interface { + isCryptoReplaceFfxFpeConfig_Alphabet() +} + +type CryptoReplaceFfxFpeConfig_CommonAlphabet struct { + CommonAlphabet CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet `protobuf:"varint,4,opt,name=common_alphabet,json=commonAlphabet,enum=google.privacy.dlp.v2.CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet,oneof"` +} +type CryptoReplaceFfxFpeConfig_CustomAlphabet struct { + CustomAlphabet string `protobuf:"bytes,5,opt,name=custom_alphabet,json=customAlphabet,oneof"` +} +type CryptoReplaceFfxFpeConfig_Radix struct { + Radix int32 `protobuf:"varint,6,opt,name=radix,oneof"` +} + +func (*CryptoReplaceFfxFpeConfig_CommonAlphabet) isCryptoReplaceFfxFpeConfig_Alphabet() {} +func (*CryptoReplaceFfxFpeConfig_CustomAlphabet) isCryptoReplaceFfxFpeConfig_Alphabet() {} +func (*CryptoReplaceFfxFpeConfig_Radix) isCryptoReplaceFfxFpeConfig_Alphabet() {} + +func (m *CryptoReplaceFfxFpeConfig) GetAlphabet() isCryptoReplaceFfxFpeConfig_Alphabet { + if m != nil { + return m.Alphabet + } + return nil +} + +func (m *CryptoReplaceFfxFpeConfig) GetCryptoKey() *CryptoKey { + if m != nil { + return m.CryptoKey + } + return nil +} + +func (m *CryptoReplaceFfxFpeConfig) GetContext() *FieldId { + if m != nil { + return m.Context + } + return nil +} + +func (m *CryptoReplaceFfxFpeConfig) GetCommonAlphabet() CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet { + if x, ok := m.GetAlphabet().(*CryptoReplaceFfxFpeConfig_CommonAlphabet); ok { + return x.CommonAlphabet + } + return CryptoReplaceFfxFpeConfig_FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED +} + +func (m *CryptoReplaceFfxFpeConfig) GetCustomAlphabet() string { + if x, ok := m.GetAlphabet().(*CryptoReplaceFfxFpeConfig_CustomAlphabet); ok { + return x.CustomAlphabet + } + return "" +} + +func (m *CryptoReplaceFfxFpeConfig) GetRadix() int32 { + if x, ok := m.GetAlphabet().(*CryptoReplaceFfxFpeConfig_Radix); ok { + return x.Radix + } + return 0 +} + +func (m *CryptoReplaceFfxFpeConfig) GetSurrogateInfoType() *InfoType { + if m != nil { + return m.SurrogateInfoType + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CryptoReplaceFfxFpeConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CryptoReplaceFfxFpeConfig_OneofMarshaler, _CryptoReplaceFfxFpeConfig_OneofUnmarshaler, _CryptoReplaceFfxFpeConfig_OneofSizer, []interface{}{ + (*CryptoReplaceFfxFpeConfig_CommonAlphabet)(nil), + (*CryptoReplaceFfxFpeConfig_CustomAlphabet)(nil), + (*CryptoReplaceFfxFpeConfig_Radix)(nil), + } +} + +func _CryptoReplaceFfxFpeConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CryptoReplaceFfxFpeConfig) + // alphabet + switch x := m.Alphabet.(type) { + case *CryptoReplaceFfxFpeConfig_CommonAlphabet: + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.CommonAlphabet)) + case *CryptoReplaceFfxFpeConfig_CustomAlphabet: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.CustomAlphabet) + case *CryptoReplaceFfxFpeConfig_Radix: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Radix)) + case nil: + default: + return fmt.Errorf("CryptoReplaceFfxFpeConfig.Alphabet has unexpected type %T", x) + } + return nil +} + +func _CryptoReplaceFfxFpeConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CryptoReplaceFfxFpeConfig) + switch tag { + case 4: // alphabet.common_alphabet + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Alphabet = &CryptoReplaceFfxFpeConfig_CommonAlphabet{CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet(x)} + return true, err + case 5: // alphabet.custom_alphabet + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Alphabet = &CryptoReplaceFfxFpeConfig_CustomAlphabet{x} + return true, err + case 6: // alphabet.radix + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Alphabet = &CryptoReplaceFfxFpeConfig_Radix{int32(x)} + return true, err + default: + return false, nil + } +} + +func _CryptoReplaceFfxFpeConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CryptoReplaceFfxFpeConfig) + // alphabet + switch x := m.Alphabet.(type) { + case *CryptoReplaceFfxFpeConfig_CommonAlphabet: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.CommonAlphabet)) + case *CryptoReplaceFfxFpeConfig_CustomAlphabet: + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.CustomAlphabet))) + n += len(x.CustomAlphabet) + case *CryptoReplaceFfxFpeConfig_Radix: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Radix)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// This is a data encryption key (DEK) (as opposed to +// a key encryption key (KEK) stored by KMS). +// When using KMS to wrap/unwrap DEKs, be sure to set an appropriate +// IAM policy on the KMS CryptoKey (KEK) to ensure an attacker cannot +// unwrap the data crypto key. +type CryptoKey struct { + // Types that are valid to be assigned to Source: + // *CryptoKey_Transient + // *CryptoKey_Unwrapped + // *CryptoKey_KmsWrapped + Source isCryptoKey_Source `protobuf_oneof:"source"` +} + +func (m *CryptoKey) Reset() { *m = CryptoKey{} } +func (m *CryptoKey) String() string { return proto.CompactTextString(m) } +func (*CryptoKey) ProtoMessage() {} +func (*CryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isCryptoKey_Source interface { + isCryptoKey_Source() +} + +type CryptoKey_Transient struct { + Transient *TransientCryptoKey `protobuf:"bytes,1,opt,name=transient,oneof"` +} +type CryptoKey_Unwrapped struct { + Unwrapped *UnwrappedCryptoKey `protobuf:"bytes,2,opt,name=unwrapped,oneof"` +} +type CryptoKey_KmsWrapped struct { + KmsWrapped *KmsWrappedCryptoKey `protobuf:"bytes,3,opt,name=kms_wrapped,json=kmsWrapped,oneof"` +} + +func (*CryptoKey_Transient) isCryptoKey_Source() {} +func (*CryptoKey_Unwrapped) isCryptoKey_Source() {} +func (*CryptoKey_KmsWrapped) isCryptoKey_Source() {} + +func (m *CryptoKey) GetSource() isCryptoKey_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *CryptoKey) GetTransient() *TransientCryptoKey { + if x, ok := m.GetSource().(*CryptoKey_Transient); ok { + return x.Transient + } + return nil +} + +func (m *CryptoKey) GetUnwrapped() *UnwrappedCryptoKey { + if x, ok := m.GetSource().(*CryptoKey_Unwrapped); ok { + return x.Unwrapped + } + return nil +} + +func (m *CryptoKey) GetKmsWrapped() *KmsWrappedCryptoKey { + if x, ok := m.GetSource().(*CryptoKey_KmsWrapped); ok { + return x.KmsWrapped + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CryptoKey) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CryptoKey_OneofMarshaler, _CryptoKey_OneofUnmarshaler, _CryptoKey_OneofSizer, []interface{}{ + (*CryptoKey_Transient)(nil), + (*CryptoKey_Unwrapped)(nil), + (*CryptoKey_KmsWrapped)(nil), + } +} + +func _CryptoKey_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CryptoKey) + // source + switch x := m.Source.(type) { + case *CryptoKey_Transient: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Transient); err != nil { + return err + } + case *CryptoKey_Unwrapped: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Unwrapped); err != nil { + return err + } + case *CryptoKey_KmsWrapped: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.KmsWrapped); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CryptoKey.Source has unexpected type %T", x) + } + return nil +} + +func _CryptoKey_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CryptoKey) + switch tag { + case 1: // source.transient + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransientCryptoKey) + err := b.DecodeMessage(msg) + m.Source = &CryptoKey_Transient{msg} + return true, err + case 2: // source.unwrapped + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UnwrappedCryptoKey) + err := b.DecodeMessage(msg) + m.Source = &CryptoKey_Unwrapped{msg} + return true, err + case 3: // source.kms_wrapped + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(KmsWrappedCryptoKey) + err := b.DecodeMessage(msg) + m.Source = &CryptoKey_KmsWrapped{msg} + return true, err + default: + return false, nil + } +} + +func _CryptoKey_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CryptoKey) + // source + switch x := m.Source.(type) { + case *CryptoKey_Transient: + s := proto.Size(x.Transient) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CryptoKey_Unwrapped: + s := proto.Size(x.Unwrapped) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CryptoKey_KmsWrapped: + s := proto.Size(x.KmsWrapped) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Use this to have a random data crypto key generated. +// It will be discarded after the request finishes. +type TransientCryptoKey struct { + // Name of the key. [required] + // This is an arbitrary string used to differentiate different keys. + // A unique key is generated per name: two separate `TransientCryptoKey` + // protos share the same generated key if their names are the same. + // When the data crypto key is generated, this name is not used in any way + // (repeating the api call will result in a different key being generated). + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *TransientCryptoKey) Reset() { *m = TransientCryptoKey{} } +func (m *TransientCryptoKey) String() string { return proto.CompactTextString(m) } +func (*TransientCryptoKey) ProtoMessage() {} +func (*TransientCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *TransientCryptoKey) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Using raw keys is prone to security risks due to accidentally +// leaking the key. Choose another type of key if possible. +type UnwrappedCryptoKey struct { + // The AES 128/192/256 bit key. [required] + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *UnwrappedCryptoKey) Reset() { *m = UnwrappedCryptoKey{} } +func (m *UnwrappedCryptoKey) String() string { return proto.CompactTextString(m) } +func (*UnwrappedCryptoKey) ProtoMessage() {} +func (*UnwrappedCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *UnwrappedCryptoKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +// Include to use an existing data crypto key wrapped by KMS. +// Authorization requires the following IAM permissions when sending a request +// to perform a crypto transformation using a kms-wrapped crypto key: +// dlp.kms.encrypt +type KmsWrappedCryptoKey struct { + // The wrapped data crypto key. [required] + WrappedKey []byte `protobuf:"bytes,1,opt,name=wrapped_key,json=wrappedKey,proto3" json:"wrapped_key,omitempty"` + // The resource name of the KMS CryptoKey to use for unwrapping. [required] + CryptoKeyName string `protobuf:"bytes,2,opt,name=crypto_key_name,json=cryptoKeyName" json:"crypto_key_name,omitempty"` +} + +func (m *KmsWrappedCryptoKey) Reset() { *m = KmsWrappedCryptoKey{} } +func (m *KmsWrappedCryptoKey) String() string { return proto.CompactTextString(m) } +func (*KmsWrappedCryptoKey) ProtoMessage() {} +func (*KmsWrappedCryptoKey) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +func (m *KmsWrappedCryptoKey) GetWrappedKey() []byte { + if m != nil { + return m.WrappedKey + } + return nil +} + +func (m *KmsWrappedCryptoKey) GetCryptoKeyName() string { + if m != nil { + return m.CryptoKeyName + } + return "" +} + +// Shifts dates by random number of days, with option to be consistent for the +// same context. +type DateShiftConfig struct { + // Range of shift in days. Actual shift will be selected at random within this + // range (inclusive ends). Negative means shift to earlier in time. Must not + // be more than 365250 days (1000 years) each direction. + // + // For example, 3 means shift date to at most 3 days into the future. + // [Required] + UpperBoundDays int32 `protobuf:"varint,1,opt,name=upper_bound_days,json=upperBoundDays" json:"upper_bound_days,omitempty"` + // For example, -5 means shift date to at most 5 days back in the past. + // [Required] + LowerBoundDays int32 `protobuf:"varint,2,opt,name=lower_bound_days,json=lowerBoundDays" json:"lower_bound_days,omitempty"` + // Points to the field that contains the context, for example, an entity id. + // If set, must also set method. If set, shift will be consistent for the + // given context. + Context *FieldId `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + // Method for calculating shift that takes context into consideration. If + // set, must also set context. Can only be applied to table items. + // + // Types that are valid to be assigned to Method: + // *DateShiftConfig_CryptoKey + Method isDateShiftConfig_Method `protobuf_oneof:"method"` +} + +func (m *DateShiftConfig) Reset() { *m = DateShiftConfig{} } +func (m *DateShiftConfig) String() string { return proto.CompactTextString(m) } +func (*DateShiftConfig) ProtoMessage() {} +func (*DateShiftConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +type isDateShiftConfig_Method interface { + isDateShiftConfig_Method() +} + +type DateShiftConfig_CryptoKey struct { + CryptoKey *CryptoKey `protobuf:"bytes,4,opt,name=crypto_key,json=cryptoKey,oneof"` +} + +func (*DateShiftConfig_CryptoKey) isDateShiftConfig_Method() {} + +func (m *DateShiftConfig) GetMethod() isDateShiftConfig_Method { + if m != nil { + return m.Method + } + return nil +} + +func (m *DateShiftConfig) GetUpperBoundDays() int32 { + if m != nil { + return m.UpperBoundDays + } + return 0 +} + +func (m *DateShiftConfig) GetLowerBoundDays() int32 { + if m != nil { + return m.LowerBoundDays + } + return 0 +} + +func (m *DateShiftConfig) GetContext() *FieldId { + if m != nil { + return m.Context + } + return nil +} + +func (m *DateShiftConfig) GetCryptoKey() *CryptoKey { + if x, ok := m.GetMethod().(*DateShiftConfig_CryptoKey); ok { + return x.CryptoKey + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DateShiftConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DateShiftConfig_OneofMarshaler, _DateShiftConfig_OneofUnmarshaler, _DateShiftConfig_OneofSizer, []interface{}{ + (*DateShiftConfig_CryptoKey)(nil), + } +} + +func _DateShiftConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DateShiftConfig) + // method + switch x := m.Method.(type) { + case *DateShiftConfig_CryptoKey: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CryptoKey); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DateShiftConfig.Method has unexpected type %T", x) + } + return nil +} + +func _DateShiftConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DateShiftConfig) + switch tag { + case 4: // method.crypto_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CryptoKey) + err := b.DecodeMessage(msg) + m.Method = &DateShiftConfig_CryptoKey{msg} + return true, err + default: + return false, nil + } +} + +func _DateShiftConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DateShiftConfig) + // method + switch x := m.Method.(type) { + case *DateShiftConfig_CryptoKey: + s := proto.Size(x.CryptoKey) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A type of transformation that will scan unstructured text and +// apply various `PrimitiveTransformation`s to each finding, where the +// transformation is applied to only values that were identified as a specific +// info_type. +type InfoTypeTransformations struct { + // Transformation for each infoType. Cannot specify more than one + // for a given infoType. [required] + Transformations []*InfoTypeTransformations_InfoTypeTransformation `protobuf:"bytes,1,rep,name=transformations" json:"transformations,omitempty"` +} + +func (m *InfoTypeTransformations) Reset() { *m = InfoTypeTransformations{} } +func (m *InfoTypeTransformations) String() string { return proto.CompactTextString(m) } +func (*InfoTypeTransformations) ProtoMessage() {} +func (*InfoTypeTransformations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +func (m *InfoTypeTransformations) GetTransformations() []*InfoTypeTransformations_InfoTypeTransformation { + if m != nil { + return m.Transformations + } + return nil +} + +// A transformation to apply to text that is identified as a specific +// info_type. +type InfoTypeTransformations_InfoTypeTransformation struct { + // InfoTypes to apply the transformation to. Empty list will match all + // available infoTypes for this transformation. + InfoTypes []*InfoType `protobuf:"bytes,1,rep,name=info_types,json=infoTypes" json:"info_types,omitempty"` + // Primitive transformation to apply to the infoType. [required] + PrimitiveTransformation *PrimitiveTransformation `protobuf:"bytes,2,opt,name=primitive_transformation,json=primitiveTransformation" json:"primitive_transformation,omitempty"` +} + +func (m *InfoTypeTransformations_InfoTypeTransformation) Reset() { + *m = InfoTypeTransformations_InfoTypeTransformation{} +} +func (m *InfoTypeTransformations_InfoTypeTransformation) String() string { + return proto.CompactTextString(m) +} +func (*InfoTypeTransformations_InfoTypeTransformation) ProtoMessage() {} +func (*InfoTypeTransformations_InfoTypeTransformation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{53, 0} +} + +func (m *InfoTypeTransformations_InfoTypeTransformation) GetInfoTypes() []*InfoType { + if m != nil { + return m.InfoTypes + } + return nil +} + +func (m *InfoTypeTransformations_InfoTypeTransformation) GetPrimitiveTransformation() *PrimitiveTransformation { + if m != nil { + return m.PrimitiveTransformation + } + return nil +} + +// The transformation to apply to the field. +type FieldTransformation struct { + // Input field(s) to apply the transformation to. [required] + Fields []*FieldId `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` + // Only apply the transformation if the condition evaluates to true for the + // given `RecordCondition`. The conditions are allowed to reference fields + // that are not used in the actual transformation. [optional] + // + // Example Use Cases: + // + // - Apply a different bucket transformation to an age column if the zip code + // column for the same record is within a specific range. + // - Redact a field if the date of birth field is greater than 85. + Condition *RecordCondition `protobuf:"bytes,3,opt,name=condition" json:"condition,omitempty"` + // Transformation to apply. [required] + // + // Types that are valid to be assigned to Transformation: + // *FieldTransformation_PrimitiveTransformation + // *FieldTransformation_InfoTypeTransformations + Transformation isFieldTransformation_Transformation `protobuf_oneof:"transformation"` +} + +func (m *FieldTransformation) Reset() { *m = FieldTransformation{} } +func (m *FieldTransformation) String() string { return proto.CompactTextString(m) } +func (*FieldTransformation) ProtoMessage() {} +func (*FieldTransformation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +type isFieldTransformation_Transformation interface { + isFieldTransformation_Transformation() +} + +type FieldTransformation_PrimitiveTransformation struct { + PrimitiveTransformation *PrimitiveTransformation `protobuf:"bytes,4,opt,name=primitive_transformation,json=primitiveTransformation,oneof"` +} +type FieldTransformation_InfoTypeTransformations struct { + InfoTypeTransformations *InfoTypeTransformations `protobuf:"bytes,5,opt,name=info_type_transformations,json=infoTypeTransformations,oneof"` +} + +func (*FieldTransformation_PrimitiveTransformation) isFieldTransformation_Transformation() {} +func (*FieldTransformation_InfoTypeTransformations) isFieldTransformation_Transformation() {} + +func (m *FieldTransformation) GetTransformation() isFieldTransformation_Transformation { + if m != nil { + return m.Transformation + } + return nil +} + +func (m *FieldTransformation) GetFields() []*FieldId { + if m != nil { + return m.Fields + } + return nil +} + +func (m *FieldTransformation) GetCondition() *RecordCondition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *FieldTransformation) GetPrimitiveTransformation() *PrimitiveTransformation { + if x, ok := m.GetTransformation().(*FieldTransformation_PrimitiveTransformation); ok { + return x.PrimitiveTransformation + } + return nil +} + +func (m *FieldTransformation) GetInfoTypeTransformations() *InfoTypeTransformations { + if x, ok := m.GetTransformation().(*FieldTransformation_InfoTypeTransformations); ok { + return x.InfoTypeTransformations + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*FieldTransformation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _FieldTransformation_OneofMarshaler, _FieldTransformation_OneofUnmarshaler, _FieldTransformation_OneofSizer, []interface{}{ + (*FieldTransformation_PrimitiveTransformation)(nil), + (*FieldTransformation_InfoTypeTransformations)(nil), + } +} + +func _FieldTransformation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*FieldTransformation) + // transformation + switch x := m.Transformation.(type) { + case *FieldTransformation_PrimitiveTransformation: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PrimitiveTransformation); err != nil { + return err + } + case *FieldTransformation_InfoTypeTransformations: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InfoTypeTransformations); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("FieldTransformation.Transformation has unexpected type %T", x) + } + return nil +} + +func _FieldTransformation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*FieldTransformation) + switch tag { + case 4: // transformation.primitive_transformation + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PrimitiveTransformation) + err := b.DecodeMessage(msg) + m.Transformation = &FieldTransformation_PrimitiveTransformation{msg} + return true, err + case 5: // transformation.info_type_transformations + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InfoTypeTransformations) + err := b.DecodeMessage(msg) + m.Transformation = &FieldTransformation_InfoTypeTransformations{msg} + return true, err + default: + return false, nil + } +} + +func _FieldTransformation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*FieldTransformation) + // transformation + switch x := m.Transformation.(type) { + case *FieldTransformation_PrimitiveTransformation: + s := proto.Size(x.PrimitiveTransformation) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *FieldTransformation_InfoTypeTransformations: + s := proto.Size(x.InfoTypeTransformations) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A type of transformation that is applied over structured data such as a +// table. +type RecordTransformations struct { + // Transform the record by applying various field transformations. + FieldTransformations []*FieldTransformation `protobuf:"bytes,1,rep,name=field_transformations,json=fieldTransformations" json:"field_transformations,omitempty"` + // Configuration defining which records get suppressed entirely. Records that + // match any suppression rule are omitted from the output [optional]. + RecordSuppressions []*RecordSuppression `protobuf:"bytes,2,rep,name=record_suppressions,json=recordSuppressions" json:"record_suppressions,omitempty"` +} + +func (m *RecordTransformations) Reset() { *m = RecordTransformations{} } +func (m *RecordTransformations) String() string { return proto.CompactTextString(m) } +func (*RecordTransformations) ProtoMessage() {} +func (*RecordTransformations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *RecordTransformations) GetFieldTransformations() []*FieldTransformation { + if m != nil { + return m.FieldTransformations + } + return nil +} + +func (m *RecordTransformations) GetRecordSuppressions() []*RecordSuppression { + if m != nil { + return m.RecordSuppressions + } + return nil +} + +// Configuration to suppress records whose suppression conditions evaluate to +// true. +type RecordSuppression struct { + // A condition that when it evaluates to true will result in the record being + // evaluated to be suppressed from the transformed content. + Condition *RecordCondition `protobuf:"bytes,1,opt,name=condition" json:"condition,omitempty"` +} + +func (m *RecordSuppression) Reset() { *m = RecordSuppression{} } +func (m *RecordSuppression) String() string { return proto.CompactTextString(m) } +func (*RecordSuppression) ProtoMessage() {} +func (*RecordSuppression) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *RecordSuppression) GetCondition() *RecordCondition { + if m != nil { + return m.Condition + } + return nil +} + +// A condition for determining whether a transformation should be applied to +// a field. +type RecordCondition struct { + // An expression. + Expressions *RecordCondition_Expressions `protobuf:"bytes,3,opt,name=expressions" json:"expressions,omitempty"` +} + +func (m *RecordCondition) Reset() { *m = RecordCondition{} } +func (m *RecordCondition) String() string { return proto.CompactTextString(m) } +func (*RecordCondition) ProtoMessage() {} +func (*RecordCondition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *RecordCondition) GetExpressions() *RecordCondition_Expressions { + if m != nil { + return m.Expressions + } + return nil +} + +// The field type of `value` and `field` do not need to match to be +// considered equal, but not all comparisons are possible. +// +// A `value` of type: +// +// - `string` can be compared against all other types +// - `boolean` can only be compared against other booleans +// - `integer` can be compared against doubles or a string if the string value +// can be parsed as an integer. +// - `double` can be compared against integers or a string if the string can +// be parsed as a double. +// - `Timestamp` can be compared against strings in RFC 3339 date string +// format. +// - `TimeOfDay` can be compared against timestamps and strings in the format +// of 'HH:mm:ss'. +// +// If we fail to compare do to type mismatch, a warning will be given and +// the condition will evaluate to false. +type RecordCondition_Condition struct { + // Field within the record this condition is evaluated against. [required] + Field *FieldId `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` + // Operator used to compare the field or infoType to the value. [required] + Operator RelationalOperator `protobuf:"varint,3,opt,name=operator,enum=google.privacy.dlp.v2.RelationalOperator" json:"operator,omitempty"` + // Value to compare against. [Required, except for `EXISTS` tests.] + Value *Value `protobuf:"bytes,4,opt,name=value" json:"value,omitempty"` +} + +func (m *RecordCondition_Condition) Reset() { *m = RecordCondition_Condition{} } +func (m *RecordCondition_Condition) String() string { return proto.CompactTextString(m) } +func (*RecordCondition_Condition) ProtoMessage() {} +func (*RecordCondition_Condition) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57, 0} } + +func (m *RecordCondition_Condition) GetField() *FieldId { + if m != nil { + return m.Field + } + return nil +} + +func (m *RecordCondition_Condition) GetOperator() RelationalOperator { + if m != nil { + return m.Operator + } + return RelationalOperator_RELATIONAL_OPERATOR_UNSPECIFIED +} + +func (m *RecordCondition_Condition) GetValue() *Value { + if m != nil { + return m.Value + } + return nil +} + +// A collection of conditions. +type RecordCondition_Conditions struct { + Conditions []*RecordCondition_Condition `protobuf:"bytes,1,rep,name=conditions" json:"conditions,omitempty"` +} + +func (m *RecordCondition_Conditions) Reset() { *m = RecordCondition_Conditions{} } +func (m *RecordCondition_Conditions) String() string { return proto.CompactTextString(m) } +func (*RecordCondition_Conditions) ProtoMessage() {} +func (*RecordCondition_Conditions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57, 1} } + +func (m *RecordCondition_Conditions) GetConditions() []*RecordCondition_Condition { + if m != nil { + return m.Conditions + } + return nil +} + +// An expression, consisting or an operator and conditions. +type RecordCondition_Expressions struct { + // The operator to apply to the result of conditions. Default and currently + // only supported value is `AND`. + LogicalOperator RecordCondition_Expressions_LogicalOperator `protobuf:"varint,1,opt,name=logical_operator,json=logicalOperator,enum=google.privacy.dlp.v2.RecordCondition_Expressions_LogicalOperator" json:"logical_operator,omitempty"` + // Types that are valid to be assigned to Type: + // *RecordCondition_Expressions_Conditions + Type isRecordCondition_Expressions_Type `protobuf_oneof:"type"` +} + +func (m *RecordCondition_Expressions) Reset() { *m = RecordCondition_Expressions{} } +func (m *RecordCondition_Expressions) String() string { return proto.CompactTextString(m) } +func (*RecordCondition_Expressions) ProtoMessage() {} +func (*RecordCondition_Expressions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57, 2} } + +type isRecordCondition_Expressions_Type interface { + isRecordCondition_Expressions_Type() +} + +type RecordCondition_Expressions_Conditions struct { + Conditions *RecordCondition_Conditions `protobuf:"bytes,3,opt,name=conditions,oneof"` +} + +func (*RecordCondition_Expressions_Conditions) isRecordCondition_Expressions_Type() {} + +func (m *RecordCondition_Expressions) GetType() isRecordCondition_Expressions_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *RecordCondition_Expressions) GetLogicalOperator() RecordCondition_Expressions_LogicalOperator { + if m != nil { + return m.LogicalOperator + } + return RecordCondition_Expressions_LOGICAL_OPERATOR_UNSPECIFIED +} + +func (m *RecordCondition_Expressions) GetConditions() *RecordCondition_Conditions { + if x, ok := m.GetType().(*RecordCondition_Expressions_Conditions); ok { + return x.Conditions + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RecordCondition_Expressions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RecordCondition_Expressions_OneofMarshaler, _RecordCondition_Expressions_OneofUnmarshaler, _RecordCondition_Expressions_OneofSizer, []interface{}{ + (*RecordCondition_Expressions_Conditions)(nil), + } +} + +func _RecordCondition_Expressions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RecordCondition_Expressions) + // type + switch x := m.Type.(type) { + case *RecordCondition_Expressions_Conditions: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Conditions); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RecordCondition_Expressions.Type has unexpected type %T", x) + } + return nil +} + +func _RecordCondition_Expressions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RecordCondition_Expressions) + switch tag { + case 3: // type.conditions + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RecordCondition_Conditions) + err := b.DecodeMessage(msg) + m.Type = &RecordCondition_Expressions_Conditions{msg} + return true, err + default: + return false, nil + } +} + +func _RecordCondition_Expressions_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RecordCondition_Expressions) + // type + switch x := m.Type.(type) { + case *RecordCondition_Expressions_Conditions: + s := proto.Size(x.Conditions) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Overview of the modifications that occurred. +type TransformationOverview struct { + // Total size in bytes that were transformed in some way. + TransformedBytes int64 `protobuf:"varint,2,opt,name=transformed_bytes,json=transformedBytes" json:"transformed_bytes,omitempty"` + // Transformations applied to the dataset. + TransformationSummaries []*TransformationSummary `protobuf:"bytes,3,rep,name=transformation_summaries,json=transformationSummaries" json:"transformation_summaries,omitempty"` +} + +func (m *TransformationOverview) Reset() { *m = TransformationOverview{} } +func (m *TransformationOverview) String() string { return proto.CompactTextString(m) } +func (*TransformationOverview) ProtoMessage() {} +func (*TransformationOverview) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *TransformationOverview) GetTransformedBytes() int64 { + if m != nil { + return m.TransformedBytes + } + return 0 +} + +func (m *TransformationOverview) GetTransformationSummaries() []*TransformationSummary { + if m != nil { + return m.TransformationSummaries + } + return nil +} + +// Summary of a single tranformation. +// Only one of 'transformation', 'field_transformation', or 'record_suppress' +// will be set. +type TransformationSummary struct { + // Set if the transformation was limited to a specific info_type. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Set if the transformation was limited to a specific FieldId. + Field *FieldId `protobuf:"bytes,2,opt,name=field" json:"field,omitempty"` + // The specific transformation these stats apply to. + Transformation *PrimitiveTransformation `protobuf:"bytes,3,opt,name=transformation" json:"transformation,omitempty"` + // The field transformation that was applied. + // If multiple field transformations are requested for a single field, + // this list will contain all of them; otherwise, only one is supplied. + FieldTransformations []*FieldTransformation `protobuf:"bytes,5,rep,name=field_transformations,json=fieldTransformations" json:"field_transformations,omitempty"` + // The specific suppression option these stats apply to. + RecordSuppress *RecordSuppression `protobuf:"bytes,6,opt,name=record_suppress,json=recordSuppress" json:"record_suppress,omitempty"` + Results []*TransformationSummary_SummaryResult `protobuf:"bytes,4,rep,name=results" json:"results,omitempty"` + // Total size in bytes that were transformed in some way. + TransformedBytes int64 `protobuf:"varint,7,opt,name=transformed_bytes,json=transformedBytes" json:"transformed_bytes,omitempty"` +} + +func (m *TransformationSummary) Reset() { *m = TransformationSummary{} } +func (m *TransformationSummary) String() string { return proto.CompactTextString(m) } +func (*TransformationSummary) ProtoMessage() {} +func (*TransformationSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *TransformationSummary) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *TransformationSummary) GetField() *FieldId { + if m != nil { + return m.Field + } + return nil +} + +func (m *TransformationSummary) GetTransformation() *PrimitiveTransformation { + if m != nil { + return m.Transformation + } + return nil +} + +func (m *TransformationSummary) GetFieldTransformations() []*FieldTransformation { + if m != nil { + return m.FieldTransformations + } + return nil +} + +func (m *TransformationSummary) GetRecordSuppress() *RecordSuppression { + if m != nil { + return m.RecordSuppress + } + return nil +} + +func (m *TransformationSummary) GetResults() []*TransformationSummary_SummaryResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *TransformationSummary) GetTransformedBytes() int64 { + if m != nil { + return m.TransformedBytes + } + return 0 +} + +// A collection that informs the user the number of times a particular +// `TransformationResultCode` and error details occurred. +type TransformationSummary_SummaryResult struct { + Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + Code TransformationSummary_TransformationResultCode `protobuf:"varint,2,opt,name=code,enum=google.privacy.dlp.v2.TransformationSummary_TransformationResultCode" json:"code,omitempty"` + // A place for warnings or errors to show up if a transformation didn't + // work as expected. + Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"` +} + +func (m *TransformationSummary_SummaryResult) Reset() { *m = TransformationSummary_SummaryResult{} } +func (m *TransformationSummary_SummaryResult) String() string { return proto.CompactTextString(m) } +func (*TransformationSummary_SummaryResult) ProtoMessage() {} +func (*TransformationSummary_SummaryResult) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{59, 0} +} + +func (m *TransformationSummary_SummaryResult) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *TransformationSummary_SummaryResult) GetCode() TransformationSummary_TransformationResultCode { + if m != nil { + return m.Code + } + return TransformationSummary_TRANSFORMATION_RESULT_CODE_UNSPECIFIED +} + +func (m *TransformationSummary_SummaryResult) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +// Schedule for triggeredJobs. +type Schedule struct { + // Types that are valid to be assigned to Option: + // *Schedule_RecurrencePeriodDuration + Option isSchedule_Option `protobuf_oneof:"option"` +} + +func (m *Schedule) Reset() { *m = Schedule{} } +func (m *Schedule) String() string { return proto.CompactTextString(m) } +func (*Schedule) ProtoMessage() {} +func (*Schedule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } + +type isSchedule_Option interface { + isSchedule_Option() +} + +type Schedule_RecurrencePeriodDuration struct { + RecurrencePeriodDuration *google_protobuf2.Duration `protobuf:"bytes,1,opt,name=recurrence_period_duration,json=recurrencePeriodDuration,oneof"` +} + +func (*Schedule_RecurrencePeriodDuration) isSchedule_Option() {} + +func (m *Schedule) GetOption() isSchedule_Option { + if m != nil { + return m.Option + } + return nil +} + +func (m *Schedule) GetRecurrencePeriodDuration() *google_protobuf2.Duration { + if x, ok := m.GetOption().(*Schedule_RecurrencePeriodDuration); ok { + return x.RecurrencePeriodDuration + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Schedule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Schedule_OneofMarshaler, _Schedule_OneofUnmarshaler, _Schedule_OneofSizer, []interface{}{ + (*Schedule_RecurrencePeriodDuration)(nil), + } +} + +func _Schedule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Schedule) + // option + switch x := m.Option.(type) { + case *Schedule_RecurrencePeriodDuration: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RecurrencePeriodDuration); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Schedule.Option has unexpected type %T", x) + } + return nil +} + +func _Schedule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Schedule) + switch tag { + case 1: // option.recurrence_period_duration + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf2.Duration) + err := b.DecodeMessage(msg) + m.Option = &Schedule_RecurrencePeriodDuration{msg} + return true, err + default: + return false, nil + } +} + +func _Schedule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Schedule) + // option + switch x := m.Option.(type) { + case *Schedule_RecurrencePeriodDuration: + s := proto.Size(x.RecurrencePeriodDuration) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The inspectTemplate contains a configuration (set of types of sensitive data +// to be detected) to be used anywhere you otherwise would normally specify +// InspectConfig. +type InspectTemplate struct { + // The template name. Output only. + // + // The template will have one of the following formats: + // `projects/PROJECT_ID/inspectTemplates/TEMPLATE_ID` OR + // `organizations/ORGANIZATION_ID/inspectTemplates/TEMPLATE_ID` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Display name (max 256 chars). + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Short description (max 256 chars). + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The creation timestamp of a inspectTemplate, output only field. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The last update timestamp of a inspectTemplate, output only field. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // The core content of the template. Configuration of the scanning process. + InspectConfig *InspectConfig `protobuf:"bytes,6,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` +} + +func (m *InspectTemplate) Reset() { *m = InspectTemplate{} } +func (m *InspectTemplate) String() string { return proto.CompactTextString(m) } +func (*InspectTemplate) ProtoMessage() {} +func (*InspectTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } + +func (m *InspectTemplate) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InspectTemplate) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *InspectTemplate) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *InspectTemplate) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *InspectTemplate) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *InspectTemplate) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +// The DeidentifyTemplates contains instructions on how to deidentify content. +type DeidentifyTemplate struct { + // The template name. Output only. + // + // The template will have one of the following formats: + // `projects/PROJECT_ID/deidentifyTemplates/TEMPLATE_ID` OR + // `organizations/ORGANIZATION_ID/deidentifyTemplates/TEMPLATE_ID` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Display name (max 256 chars). + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // Short description (max 256 chars). + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The creation timestamp of a inspectTemplate, output only field. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The last update timestamp of a inspectTemplate, output only field. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // ///////////// // The core content of the template // /////////////// + DeidentifyConfig *DeidentifyConfig `protobuf:"bytes,6,opt,name=deidentify_config,json=deidentifyConfig" json:"deidentify_config,omitempty"` +} + +func (m *DeidentifyTemplate) Reset() { *m = DeidentifyTemplate{} } +func (m *DeidentifyTemplate) String() string { return proto.CompactTextString(m) } +func (*DeidentifyTemplate) ProtoMessage() {} +func (*DeidentifyTemplate) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} } + +func (m *DeidentifyTemplate) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeidentifyTemplate) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *DeidentifyTemplate) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *DeidentifyTemplate) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *DeidentifyTemplate) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *DeidentifyTemplate) GetDeidentifyConfig() *DeidentifyConfig { + if m != nil { + return m.DeidentifyConfig + } + return nil +} + +// Details information about an error encountered during job execution or +// the results of an unsuccessful activation of the JobTrigger. +// Output only field. +type Error struct { + Details *google_rpc.Status `protobuf:"bytes,1,opt,name=details" json:"details,omitempty"` + // The times the error occurred. + Timestamps []*google_protobuf1.Timestamp `protobuf:"bytes,2,rep,name=timestamps" json:"timestamps,omitempty"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} } + +func (m *Error) GetDetails() *google_rpc.Status { + if m != nil { + return m.Details + } + return nil +} + +func (m *Error) GetTimestamps() []*google_protobuf1.Timestamp { + if m != nil { + return m.Timestamps + } + return nil +} + +// Contains a configuration to make dlp api calls on a repeating basis. +type JobTrigger struct { + // Unique resource name for the triggeredJob, assigned by the service when the + // triggeredJob is created, for example + // `projects/dlp-test-project/triggeredJobs/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Display name (max 100 chars) + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // User provided description (max 256 chars) + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The configuration details for the specific type of job to run. + // + // Types that are valid to be assigned to Job: + // *JobTrigger_InspectJob + Job isJobTrigger_Job `protobuf_oneof:"job"` + // A list of triggers which will be OR'ed together. Only one in the list + // needs to trigger for a job to be started. The list may contain only + // a single Schedule trigger and must have at least one object. + Triggers []*JobTrigger_Trigger `protobuf:"bytes,5,rep,name=triggers" json:"triggers,omitempty"` + // A stream of errors encountered when the trigger was activated. Repeated + // errors may result in the JobTrigger automaticaly being paused. + // Will return the last 100 errors. Whenever the JobTrigger is modified + // this list will be cleared. Output only field. + Errors []*Error `protobuf:"bytes,6,rep,name=errors" json:"errors,omitempty"` + // The creation timestamp of a triggeredJob, output only field. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // The last update timestamp of a triggeredJob, output only field. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,8,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // The timestamp of the last time this trigger executed, output only field. + LastRunTime *google_protobuf1.Timestamp `protobuf:"bytes,9,opt,name=last_run_time,json=lastRunTime" json:"last_run_time,omitempty"` + // A status for this trigger. [required] + Status JobTrigger_Status `protobuf:"varint,10,opt,name=status,enum=google.privacy.dlp.v2.JobTrigger_Status" json:"status,omitempty"` +} + +func (m *JobTrigger) Reset() { *m = JobTrigger{} } +func (m *JobTrigger) String() string { return proto.CompactTextString(m) } +func (*JobTrigger) ProtoMessage() {} +func (*JobTrigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } + +type isJobTrigger_Job interface { + isJobTrigger_Job() +} + +type JobTrigger_InspectJob struct { + InspectJob *InspectJobConfig `protobuf:"bytes,4,opt,name=inspect_job,json=inspectJob,oneof"` +} + +func (*JobTrigger_InspectJob) isJobTrigger_Job() {} + +func (m *JobTrigger) GetJob() isJobTrigger_Job { + if m != nil { + return m.Job + } + return nil +} + +func (m *JobTrigger) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *JobTrigger) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *JobTrigger) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *JobTrigger) GetInspectJob() *InspectJobConfig { + if x, ok := m.GetJob().(*JobTrigger_InspectJob); ok { + return x.InspectJob + } + return nil +} + +func (m *JobTrigger) GetTriggers() []*JobTrigger_Trigger { + if m != nil { + return m.Triggers + } + return nil +} + +func (m *JobTrigger) GetErrors() []*Error { + if m != nil { + return m.Errors + } + return nil +} + +func (m *JobTrigger) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *JobTrigger) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *JobTrigger) GetLastRunTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastRunTime + } + return nil +} + +func (m *JobTrigger) GetStatus() JobTrigger_Status { + if m != nil { + return m.Status + } + return JobTrigger_STATUS_UNSPECIFIED +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*JobTrigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _JobTrigger_OneofMarshaler, _JobTrigger_OneofUnmarshaler, _JobTrigger_OneofSizer, []interface{}{ + (*JobTrigger_InspectJob)(nil), + } +} + +func _JobTrigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*JobTrigger) + // job + switch x := m.Job.(type) { + case *JobTrigger_InspectJob: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InspectJob); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("JobTrigger.Job has unexpected type %T", x) + } + return nil +} + +func _JobTrigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*JobTrigger) + switch tag { + case 4: // job.inspect_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InspectJobConfig) + err := b.DecodeMessage(msg) + m.Job = &JobTrigger_InspectJob{msg} + return true, err + default: + return false, nil + } +} + +func _JobTrigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*JobTrigger) + // job + switch x := m.Job.(type) { + case *JobTrigger_InspectJob: + s := proto.Size(x.InspectJob) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// What event needs to occur for a new job to be started. +type JobTrigger_Trigger struct { + // Types that are valid to be assigned to Trigger: + // *JobTrigger_Trigger_Schedule + Trigger isJobTrigger_Trigger_Trigger `protobuf_oneof:"trigger"` +} + +func (m *JobTrigger_Trigger) Reset() { *m = JobTrigger_Trigger{} } +func (m *JobTrigger_Trigger) String() string { return proto.CompactTextString(m) } +func (*JobTrigger_Trigger) ProtoMessage() {} +func (*JobTrigger_Trigger) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64, 0} } + +type isJobTrigger_Trigger_Trigger interface { + isJobTrigger_Trigger_Trigger() +} + +type JobTrigger_Trigger_Schedule struct { + Schedule *Schedule `protobuf:"bytes,1,opt,name=schedule,oneof"` +} + +func (*JobTrigger_Trigger_Schedule) isJobTrigger_Trigger_Trigger() {} + +func (m *JobTrigger_Trigger) GetTrigger() isJobTrigger_Trigger_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func (m *JobTrigger_Trigger) GetSchedule() *Schedule { + if x, ok := m.GetTrigger().(*JobTrigger_Trigger_Schedule); ok { + return x.Schedule + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*JobTrigger_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _JobTrigger_Trigger_OneofMarshaler, _JobTrigger_Trigger_OneofUnmarshaler, _JobTrigger_Trigger_OneofSizer, []interface{}{ + (*JobTrigger_Trigger_Schedule)(nil), + } +} + +func _JobTrigger_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*JobTrigger_Trigger) + // trigger + switch x := m.Trigger.(type) { + case *JobTrigger_Trigger_Schedule: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schedule); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("JobTrigger_Trigger.Trigger has unexpected type %T", x) + } + return nil +} + +func _JobTrigger_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*JobTrigger_Trigger) + switch tag { + case 1: // trigger.schedule + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schedule) + err := b.DecodeMessage(msg) + m.Trigger = &JobTrigger_Trigger_Schedule{msg} + return true, err + default: + return false, nil + } +} + +func _JobTrigger_Trigger_OneofSizer(msg proto.Message) (n int) { + m := msg.(*JobTrigger_Trigger) + // trigger + switch x := m.Trigger.(type) { + case *JobTrigger_Trigger_Schedule: + s := proto.Size(x.Schedule) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A task to execute on the completion of a job. +type Action struct { + // Types that are valid to be assigned to Action: + // *Action_SaveFindings_ + // *Action_PubSub + Action isAction_Action `protobuf_oneof:"action"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} } + +type isAction_Action interface { + isAction_Action() +} + +type Action_SaveFindings_ struct { + SaveFindings *Action_SaveFindings `protobuf:"bytes,1,opt,name=save_findings,json=saveFindings,oneof"` +} +type Action_PubSub struct { + PubSub *Action_PublishToPubSub `protobuf:"bytes,2,opt,name=pub_sub,json=pubSub,oneof"` +} + +func (*Action_SaveFindings_) isAction_Action() {} +func (*Action_PubSub) isAction_Action() {} + +func (m *Action) GetAction() isAction_Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *Action) GetSaveFindings() *Action_SaveFindings { + if x, ok := m.GetAction().(*Action_SaveFindings_); ok { + return x.SaveFindings + } + return nil +} + +func (m *Action) GetPubSub() *Action_PublishToPubSub { + if x, ok := m.GetAction().(*Action_PubSub); ok { + return x.PubSub + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Action) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Action_OneofMarshaler, _Action_OneofUnmarshaler, _Action_OneofSizer, []interface{}{ + (*Action_SaveFindings_)(nil), + (*Action_PubSub)(nil), + } +} + +func _Action_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Action) + // action + switch x := m.Action.(type) { + case *Action_SaveFindings_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SaveFindings); err != nil { + return err + } + case *Action_PubSub: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PubSub); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Action.Action has unexpected type %T", x) + } + return nil +} + +func _Action_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Action) + switch tag { + case 1: // action.save_findings + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Action_SaveFindings) + err := b.DecodeMessage(msg) + m.Action = &Action_SaveFindings_{msg} + return true, err + case 2: // action.pub_sub + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Action_PublishToPubSub) + err := b.DecodeMessage(msg) + m.Action = &Action_PubSub{msg} + return true, err + default: + return false, nil + } +} + +func _Action_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Action) + // action + switch x := m.Action.(type) { + case *Action_SaveFindings_: + s := proto.Size(x.SaveFindings) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Action_PubSub: + s := proto.Size(x.PubSub) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// If set, the detailed findings will be persisted to the specified +// OutputStorageConfig. Compatible with: Inspect +type Action_SaveFindings struct { + OutputConfig *OutputStorageConfig `protobuf:"bytes,1,opt,name=output_config,json=outputConfig" json:"output_config,omitempty"` +} + +func (m *Action_SaveFindings) Reset() { *m = Action_SaveFindings{} } +func (m *Action_SaveFindings) String() string { return proto.CompactTextString(m) } +func (*Action_SaveFindings) ProtoMessage() {} +func (*Action_SaveFindings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65, 0} } + +func (m *Action_SaveFindings) GetOutputConfig() *OutputStorageConfig { + if m != nil { + return m.OutputConfig + } + return nil +} + +// Publish the results of a DlpJob to a pub sub channel. +// Compatible with: Inpect, Risk +type Action_PublishToPubSub struct { + // Cloud Pub/Sub topic to send notifications to. The topic must have given + // publishing access rights to the DLP API service account executing + // the long running DlpJob sending the notifications. + // Format is projects/{project}/topics/{topic}. + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *Action_PublishToPubSub) Reset() { *m = Action_PublishToPubSub{} } +func (m *Action_PublishToPubSub) String() string { return proto.CompactTextString(m) } +func (*Action_PublishToPubSub) ProtoMessage() {} +func (*Action_PublishToPubSub) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65, 1} } + +func (m *Action_PublishToPubSub) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +// Request message for CreateInspectTemplate. +type CreateInspectTemplateRequest struct { + // The parent resource name, for example projects/my-project-id or + // organizations/my-org-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The InspectTemplate to create. + InspectTemplate *InspectTemplate `protobuf:"bytes,2,opt,name=inspect_template,json=inspectTemplate" json:"inspect_template,omitempty"` + // The template id can contain uppercase and lowercase letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TemplateId string `protobuf:"bytes,3,opt,name=template_id,json=templateId" json:"template_id,omitempty"` +} + +func (m *CreateInspectTemplateRequest) Reset() { *m = CreateInspectTemplateRequest{} } +func (m *CreateInspectTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInspectTemplateRequest) ProtoMessage() {} +func (*CreateInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} } + +func (m *CreateInspectTemplateRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateInspectTemplateRequest) GetInspectTemplate() *InspectTemplate { + if m != nil { + return m.InspectTemplate + } + return nil +} + +func (m *CreateInspectTemplateRequest) GetTemplateId() string { + if m != nil { + return m.TemplateId + } + return "" +} + +// Request message for UpdateInspectTemplate. +type UpdateInspectTemplateRequest struct { + // Resource name of organization and inspectTemplate to be updated, for + // example `organizations/433245324/inspectTemplates/432452342` or + // projects/project-id/inspectTemplates/432452342. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // New InspectTemplate value. + InspectTemplate *InspectTemplate `protobuf:"bytes,2,opt,name=inspect_template,json=inspectTemplate" json:"inspect_template,omitempty"` + // Mask to control which fields get updated. + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateInspectTemplateRequest) Reset() { *m = UpdateInspectTemplateRequest{} } +func (m *UpdateInspectTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateInspectTemplateRequest) ProtoMessage() {} +func (*UpdateInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} } + +func (m *UpdateInspectTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateInspectTemplateRequest) GetInspectTemplate() *InspectTemplate { + if m != nil { + return m.InspectTemplate + } + return nil +} + +func (m *UpdateInspectTemplateRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for GetInspectTemplate. +type GetInspectTemplateRequest struct { + // Resource name of the organization and inspectTemplate to be read, for + // example `organizations/433245324/inspectTemplates/432452342` or + // projects/project-id/inspectTemplates/432452342. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetInspectTemplateRequest) Reset() { *m = GetInspectTemplateRequest{} } +func (m *GetInspectTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*GetInspectTemplateRequest) ProtoMessage() {} +func (*GetInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } + +func (m *GetInspectTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for ListInspectTemplates. +type ListInspectTemplatesRequest struct { + // The parent resource name, for example projects/my-project-id or + // organizations/my-org-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional page token to continue retrieval. Comes from previous call + // to `ListInspectTemplates`. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional size of the page, can be limited by server. If zero server returns + // a page of max size 100. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListInspectTemplatesRequest) Reset() { *m = ListInspectTemplatesRequest{} } +func (m *ListInspectTemplatesRequest) String() string { return proto.CompactTextString(m) } +func (*ListInspectTemplatesRequest) ProtoMessage() {} +func (*ListInspectTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } + +func (m *ListInspectTemplatesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListInspectTemplatesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListInspectTemplatesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for ListInspectTemplates. +type ListInspectTemplatesResponse struct { + // List of inspectTemplates, up to page_size in ListInspectTemplatesRequest. + InspectTemplates []*InspectTemplate `protobuf:"bytes,1,rep,name=inspect_templates,json=inspectTemplates" json:"inspect_templates,omitempty"` + // If the next page is available then the next page token to be used + // in following ListInspectTemplates request. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListInspectTemplatesResponse) Reset() { *m = ListInspectTemplatesResponse{} } +func (m *ListInspectTemplatesResponse) String() string { return proto.CompactTextString(m) } +func (*ListInspectTemplatesResponse) ProtoMessage() {} +func (*ListInspectTemplatesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70} } + +func (m *ListInspectTemplatesResponse) GetInspectTemplates() []*InspectTemplate { + if m != nil { + return m.InspectTemplates + } + return nil +} + +func (m *ListInspectTemplatesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for DeleteInspectTemplate. +type DeleteInspectTemplateRequest struct { + // Resource name of the organization and inspectTemplate to be deleted, for + // example `organizations/433245324/inspectTemplates/432452342` or + // projects/project-id/inspectTemplates/432452342. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteInspectTemplateRequest) Reset() { *m = DeleteInspectTemplateRequest{} } +func (m *DeleteInspectTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteInspectTemplateRequest) ProtoMessage() {} +func (*DeleteInspectTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{71} } + +func (m *DeleteInspectTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for CreateJobTrigger. +type CreateJobTriggerRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The JobTrigger to create. + JobTrigger *JobTrigger `protobuf:"bytes,2,opt,name=job_trigger,json=jobTrigger" json:"job_trigger,omitempty"` + // The trigger id can contain uppercase and lowercase letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TriggerId string `protobuf:"bytes,3,opt,name=trigger_id,json=triggerId" json:"trigger_id,omitempty"` +} + +func (m *CreateJobTriggerRequest) Reset() { *m = CreateJobTriggerRequest{} } +func (m *CreateJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*CreateJobTriggerRequest) ProtoMessage() {} +func (*CreateJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{72} } + +func (m *CreateJobTriggerRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateJobTriggerRequest) GetJobTrigger() *JobTrigger { + if m != nil { + return m.JobTrigger + } + return nil +} + +func (m *CreateJobTriggerRequest) GetTriggerId() string { + if m != nil { + return m.TriggerId + } + return "" +} + +// Request message for UpdateJobTrigger. +type UpdateJobTriggerRequest struct { + // Resource name of the project and the triggeredJob, for example + // `projects/dlp-test-project/jobTriggers/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // New JobTrigger value. + JobTrigger *JobTrigger `protobuf:"bytes,2,opt,name=job_trigger,json=jobTrigger" json:"job_trigger,omitempty"` + // Mask to control which fields get updated. + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateJobTriggerRequest) Reset() { *m = UpdateJobTriggerRequest{} } +func (m *UpdateJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateJobTriggerRequest) ProtoMessage() {} +func (*UpdateJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{73} } + +func (m *UpdateJobTriggerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateJobTriggerRequest) GetJobTrigger() *JobTrigger { + if m != nil { + return m.JobTrigger + } + return nil +} + +func (m *UpdateJobTriggerRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for GetJobTrigger. +type GetJobTriggerRequest struct { + // Resource name of the project and the triggeredJob, for example + // `projects/dlp-test-project/jobTriggers/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetJobTriggerRequest) Reset() { *m = GetJobTriggerRequest{} } +func (m *GetJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*GetJobTriggerRequest) ProtoMessage() {} +func (*GetJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{74} } + +func (m *GetJobTriggerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for CreateDlpJobRequest. Used to initiate long running +// jobs such as calculating risk metrics or inspecting Google Cloud +// Storage. +type CreateDlpJobRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The configuration details for the specific type of job to run. + // + // Types that are valid to be assigned to Job: + // *CreateDlpJobRequest_InspectJob + // *CreateDlpJobRequest_RiskJob + Job isCreateDlpJobRequest_Job `protobuf_oneof:"job"` + // The job id can contain uppercase and lowercase letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + JobId string `protobuf:"bytes,4,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *CreateDlpJobRequest) Reset() { *m = CreateDlpJobRequest{} } +func (m *CreateDlpJobRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDlpJobRequest) ProtoMessage() {} +func (*CreateDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{75} } + +type isCreateDlpJobRequest_Job interface { + isCreateDlpJobRequest_Job() +} + +type CreateDlpJobRequest_InspectJob struct { + InspectJob *InspectJobConfig `protobuf:"bytes,2,opt,name=inspect_job,json=inspectJob,oneof"` +} +type CreateDlpJobRequest_RiskJob struct { + RiskJob *RiskAnalysisJobConfig `protobuf:"bytes,3,opt,name=risk_job,json=riskJob,oneof"` +} + +func (*CreateDlpJobRequest_InspectJob) isCreateDlpJobRequest_Job() {} +func (*CreateDlpJobRequest_RiskJob) isCreateDlpJobRequest_Job() {} + +func (m *CreateDlpJobRequest) GetJob() isCreateDlpJobRequest_Job { + if m != nil { + return m.Job + } + return nil +} + +func (m *CreateDlpJobRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDlpJobRequest) GetInspectJob() *InspectJobConfig { + if x, ok := m.GetJob().(*CreateDlpJobRequest_InspectJob); ok { + return x.InspectJob + } + return nil +} + +func (m *CreateDlpJobRequest) GetRiskJob() *RiskAnalysisJobConfig { + if x, ok := m.GetJob().(*CreateDlpJobRequest_RiskJob); ok { + return x.RiskJob + } + return nil +} + +func (m *CreateDlpJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CreateDlpJobRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CreateDlpJobRequest_OneofMarshaler, _CreateDlpJobRequest_OneofUnmarshaler, _CreateDlpJobRequest_OneofSizer, []interface{}{ + (*CreateDlpJobRequest_InspectJob)(nil), + (*CreateDlpJobRequest_RiskJob)(nil), + } +} + +func _CreateDlpJobRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CreateDlpJobRequest) + // job + switch x := m.Job.(type) { + case *CreateDlpJobRequest_InspectJob: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InspectJob); err != nil { + return err + } + case *CreateDlpJobRequest_RiskJob: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RiskJob); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CreateDlpJobRequest.Job has unexpected type %T", x) + } + return nil +} + +func _CreateDlpJobRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CreateDlpJobRequest) + switch tag { + case 2: // job.inspect_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InspectJobConfig) + err := b.DecodeMessage(msg) + m.Job = &CreateDlpJobRequest_InspectJob{msg} + return true, err + case 3: // job.risk_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RiskAnalysisJobConfig) + err := b.DecodeMessage(msg) + m.Job = &CreateDlpJobRequest_RiskJob{msg} + return true, err + default: + return false, nil + } +} + +func _CreateDlpJobRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CreateDlpJobRequest) + // job + switch x := m.Job.(type) { + case *CreateDlpJobRequest_InspectJob: + s := proto.Size(x.InspectJob) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CreateDlpJobRequest_RiskJob: + s := proto.Size(x.RiskJob) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message for ListJobTriggers. +type ListJobTriggersRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional page token to continue retrieval. Comes from previous call + // to ListJobTriggers. `order_by` and `filter` should not change for + // subsequent calls, but can be omitted if token is specified. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional size of the page, can be limited by a server. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional comma separated list of triggeredJob fields to order by, + // followed by 'asc/desc' postfix, i.e. + // `"create_time asc,name desc,schedule_mode asc"`. This list is + // case-insensitive. + // + // Example: `"name asc,schedule_mode desc, status desc"` + // + // Supported filters keys and values are: + // + // - `create_time`: corresponds to time the triggeredJob was created. + // - `update_time`: corresponds to time the triggeredJob was last updated. + // - `name`: corresponds to JobTrigger's display name. + // - `status`: corresponds to the triggeredJob status. + OrderBy string `protobuf:"bytes,4,opt,name=order_by,json=orderBy" json:"order_by,omitempty"` +} + +func (m *ListJobTriggersRequest) Reset() { *m = ListJobTriggersRequest{} } +func (m *ListJobTriggersRequest) String() string { return proto.CompactTextString(m) } +func (*ListJobTriggersRequest) ProtoMessage() {} +func (*ListJobTriggersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{76} } + +func (m *ListJobTriggersRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListJobTriggersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListJobTriggersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListJobTriggersRequest) GetOrderBy() string { + if m != nil { + return m.OrderBy + } + return "" +} + +// Response message for ListJobTriggers. +type ListJobTriggersResponse struct { + // List of triggeredJobs, up to page_size in ListJobTriggersRequest. + JobTriggers []*JobTrigger `protobuf:"bytes,1,rep,name=job_triggers,json=jobTriggers" json:"job_triggers,omitempty"` + // If the next page is available then the next page token to be used + // in following ListJobTriggers request. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListJobTriggersResponse) Reset() { *m = ListJobTriggersResponse{} } +func (m *ListJobTriggersResponse) String() string { return proto.CompactTextString(m) } +func (*ListJobTriggersResponse) ProtoMessage() {} +func (*ListJobTriggersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{77} } + +func (m *ListJobTriggersResponse) GetJobTriggers() []*JobTrigger { + if m != nil { + return m.JobTriggers + } + return nil +} + +func (m *ListJobTriggersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for DeleteJobTrigger. +type DeleteJobTriggerRequest struct { + // Resource name of the project and the triggeredJob, for example + // `projects/dlp-test-project/jobTriggers/53234423`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteJobTriggerRequest) Reset() { *m = DeleteJobTriggerRequest{} } +func (m *DeleteJobTriggerRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteJobTriggerRequest) ProtoMessage() {} +func (*DeleteJobTriggerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{78} } + +func (m *DeleteJobTriggerRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type InspectJobConfig struct { + // The data to scan. + StorageConfig *StorageConfig `protobuf:"bytes,1,opt,name=storage_config,json=storageConfig" json:"storage_config,omitempty"` + // How and what to scan for. + InspectConfig *InspectConfig `protobuf:"bytes,2,opt,name=inspect_config,json=inspectConfig" json:"inspect_config,omitempty"` + // If provided, will be used as the default for all values in InspectConfig. + // `inspect_config` will be merged into the values persisted as part of the + // template. + InspectTemplateName string `protobuf:"bytes,3,opt,name=inspect_template_name,json=inspectTemplateName" json:"inspect_template_name,omitempty"` + // Actions to execute at the completion of the job. Are executed in the order + // provided. + Actions []*Action `protobuf:"bytes,4,rep,name=actions" json:"actions,omitempty"` +} + +func (m *InspectJobConfig) Reset() { *m = InspectJobConfig{} } +func (m *InspectJobConfig) String() string { return proto.CompactTextString(m) } +func (*InspectJobConfig) ProtoMessage() {} +func (*InspectJobConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{79} } + +func (m *InspectJobConfig) GetStorageConfig() *StorageConfig { + if m != nil { + return m.StorageConfig + } + return nil +} + +func (m *InspectJobConfig) GetInspectConfig() *InspectConfig { + if m != nil { + return m.InspectConfig + } + return nil +} + +func (m *InspectJobConfig) GetInspectTemplateName() string { + if m != nil { + return m.InspectTemplateName + } + return "" +} + +func (m *InspectJobConfig) GetActions() []*Action { + if m != nil { + return m.Actions + } + return nil +} + +// Combines all of the information about a DLP job. +type DlpJob struct { + // The server-assigned name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The type of job. + Type DlpJobType `protobuf:"varint,2,opt,name=type,enum=google.privacy.dlp.v2.DlpJobType" json:"type,omitempty"` + // State of a job. + State DlpJob_JobState `protobuf:"varint,3,opt,name=state,enum=google.privacy.dlp.v2.DlpJob_JobState" json:"state,omitempty"` + // Types that are valid to be assigned to Details: + // *DlpJob_RiskDetails + // *DlpJob_InspectDetails + Details isDlpJob_Details `protobuf_oneof:"details"` + // Time when the job was created. + CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Time when the job started. + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,7,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Time when the job finished. + EndTime *google_protobuf1.Timestamp `protobuf:"bytes,8,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // If created by a job trigger, the resource name of the trigger that + // instantiated the job. + JobTriggerName string `protobuf:"bytes,10,opt,name=job_trigger_name,json=jobTriggerName" json:"job_trigger_name,omitempty"` + // A stream of errors encountered running the job. + Errors []*Error `protobuf:"bytes,11,rep,name=errors" json:"errors,omitempty"` +} + +func (m *DlpJob) Reset() { *m = DlpJob{} } +func (m *DlpJob) String() string { return proto.CompactTextString(m) } +func (*DlpJob) ProtoMessage() {} +func (*DlpJob) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{80} } + +type isDlpJob_Details interface { + isDlpJob_Details() +} + +type DlpJob_RiskDetails struct { + RiskDetails *AnalyzeDataSourceRiskDetails `protobuf:"bytes,4,opt,name=risk_details,json=riskDetails,oneof"` +} +type DlpJob_InspectDetails struct { + InspectDetails *InspectDataSourceDetails `protobuf:"bytes,5,opt,name=inspect_details,json=inspectDetails,oneof"` +} + +func (*DlpJob_RiskDetails) isDlpJob_Details() {} +func (*DlpJob_InspectDetails) isDlpJob_Details() {} + +func (m *DlpJob) GetDetails() isDlpJob_Details { + if m != nil { + return m.Details + } + return nil +} + +func (m *DlpJob) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DlpJob) GetType() DlpJobType { + if m != nil { + return m.Type + } + return DlpJobType_DLP_JOB_TYPE_UNSPECIFIED +} + +func (m *DlpJob) GetState() DlpJob_JobState { + if m != nil { + return m.State + } + return DlpJob_JOB_STATE_UNSPECIFIED +} + +func (m *DlpJob) GetRiskDetails() *AnalyzeDataSourceRiskDetails { + if x, ok := m.GetDetails().(*DlpJob_RiskDetails); ok { + return x.RiskDetails + } + return nil +} + +func (m *DlpJob) GetInspectDetails() *InspectDataSourceDetails { + if x, ok := m.GetDetails().(*DlpJob_InspectDetails); ok { + return x.InspectDetails + } + return nil +} + +func (m *DlpJob) GetCreateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *DlpJob) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *DlpJob) GetEndTime() *google_protobuf1.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *DlpJob) GetJobTriggerName() string { + if m != nil { + return m.JobTriggerName + } + return "" +} + +func (m *DlpJob) GetErrors() []*Error { + if m != nil { + return m.Errors + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DlpJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DlpJob_OneofMarshaler, _DlpJob_OneofUnmarshaler, _DlpJob_OneofSizer, []interface{}{ + (*DlpJob_RiskDetails)(nil), + (*DlpJob_InspectDetails)(nil), + } +} + +func _DlpJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DlpJob) + // details + switch x := m.Details.(type) { + case *DlpJob_RiskDetails: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RiskDetails); err != nil { + return err + } + case *DlpJob_InspectDetails: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InspectDetails); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DlpJob.Details has unexpected type %T", x) + } + return nil +} + +func _DlpJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DlpJob) + switch tag { + case 4: // details.risk_details + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AnalyzeDataSourceRiskDetails) + err := b.DecodeMessage(msg) + m.Details = &DlpJob_RiskDetails{msg} + return true, err + case 5: // details.inspect_details + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InspectDataSourceDetails) + err := b.DecodeMessage(msg) + m.Details = &DlpJob_InspectDetails{msg} + return true, err + default: + return false, nil + } +} + +func _DlpJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DlpJob) + // details + switch x := m.Details.(type) { + case *DlpJob_RiskDetails: + s := proto.Size(x.RiskDetails) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *DlpJob_InspectDetails: + s := proto.Size(x.InspectDetails) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The request message for [DlpJobs.GetDlpJob][]. +type GetDlpJobRequest struct { + // The name of the DlpJob resource. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetDlpJobRequest) Reset() { *m = GetDlpJobRequest{} } +func (m *GetDlpJobRequest) String() string { return proto.CompactTextString(m) } +func (*GetDlpJobRequest) ProtoMessage() {} +func (*GetDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{81} } + +func (m *GetDlpJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request message for listing DLP jobs. +type ListDlpJobsRequest struct { + // The parent resource name, for example projects/my-project-id. + Parent string `protobuf:"bytes,4,opt,name=parent" json:"parent,omitempty"` + // Optional. Allows filtering. + // + // Supported syntax: + // + // * Filter expressions are made up of one or more restrictions. + // * Restrictions can be combined by `AND` or `OR` logical operators. A + // sequence of restrictions implicitly uses `AND`. + // * A restriction has the form of ` `. + // * Supported fields/values for inspect jobs: + // - `state` - PENDING|RUNNING|CANCELED|FINISHED|FAILED + // - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY + // - `trigger_name` - The resource name of the trigger that created job. + // * Supported fields for risk analysis jobs: + // - `state` - RUNNING|CANCELED|FINISHED|FAILED + // * The operator must be `=` or `!=`. + // + // Examples: + // + // * inspected_storage = cloud_storage AND state = done + // * inspected_storage = cloud_storage OR inspected_storage = bigquery + // * inspected_storage = cloud_storage AND (state = done OR state = canceled) + // + // The length of this field should be no more than 500 characters. + Filter string `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + // The standard list page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The standard list page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // The type of job. Defaults to `DlpJobType.INSPECT` + Type DlpJobType `protobuf:"varint,5,opt,name=type,enum=google.privacy.dlp.v2.DlpJobType" json:"type,omitempty"` +} + +func (m *ListDlpJobsRequest) Reset() { *m = ListDlpJobsRequest{} } +func (m *ListDlpJobsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDlpJobsRequest) ProtoMessage() {} +func (*ListDlpJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{82} } + +func (m *ListDlpJobsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDlpJobsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListDlpJobsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDlpJobsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListDlpJobsRequest) GetType() DlpJobType { + if m != nil { + return m.Type + } + return DlpJobType_DLP_JOB_TYPE_UNSPECIFIED +} + +// The response message for listing DLP jobs. +type ListDlpJobsResponse struct { + // A list of DlpJobs that matches the specified filter in the request. + Jobs []*DlpJob `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` + // The standard List next-page token. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDlpJobsResponse) Reset() { *m = ListDlpJobsResponse{} } +func (m *ListDlpJobsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDlpJobsResponse) ProtoMessage() {} +func (*ListDlpJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{83} } + +func (m *ListDlpJobsResponse) GetJobs() []*DlpJob { + if m != nil { + return m.Jobs + } + return nil +} + +func (m *ListDlpJobsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The request message for canceling a DLP job. +type CancelDlpJobRequest struct { + // The name of the DlpJob resource to be cancelled. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *CancelDlpJobRequest) Reset() { *m = CancelDlpJobRequest{} } +func (m *CancelDlpJobRequest) String() string { return proto.CompactTextString(m) } +func (*CancelDlpJobRequest) ProtoMessage() {} +func (*CancelDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{84} } + +func (m *CancelDlpJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The request message for deleting a DLP job. +type DeleteDlpJobRequest struct { + // The name of the DlpJob resource to be deleted. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteDlpJobRequest) Reset() { *m = DeleteDlpJobRequest{} } +func (m *DeleteDlpJobRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDlpJobRequest) ProtoMessage() {} +func (*DeleteDlpJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{85} } + +func (m *DeleteDlpJobRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for CreateDeidentifyTemplate. +type CreateDeidentifyTemplateRequest struct { + // The parent resource name, for example projects/my-project-id or + // organizations/my-org-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The DeidentifyTemplate to create. + DeidentifyTemplate *DeidentifyTemplate `protobuf:"bytes,2,opt,name=deidentify_template,json=deidentifyTemplate" json:"deidentify_template,omitempty"` + // The template id can contain uppercase and lowercase letters, + // numbers, and hyphens; that is, it must match the regular + // expression: `[a-zA-Z\\d-]+`. The maximum length is 100 + // characters. Can be empty to allow the system to generate one. + TemplateId string `protobuf:"bytes,3,opt,name=template_id,json=templateId" json:"template_id,omitempty"` +} + +func (m *CreateDeidentifyTemplateRequest) Reset() { *m = CreateDeidentifyTemplateRequest{} } +func (m *CreateDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDeidentifyTemplateRequest) ProtoMessage() {} +func (*CreateDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{86} +} + +func (m *CreateDeidentifyTemplateRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDeidentifyTemplateRequest) GetDeidentifyTemplate() *DeidentifyTemplate { + if m != nil { + return m.DeidentifyTemplate + } + return nil +} + +func (m *CreateDeidentifyTemplateRequest) GetTemplateId() string { + if m != nil { + return m.TemplateId + } + return "" +} + +// Request message for UpdateDeidentifyTemplate. +type UpdateDeidentifyTemplateRequest struct { + // Resource name of organization and deidentify template to be updated, for + // example `organizations/433245324/deidentifyTemplates/432452342` or + // projects/project-id/deidentifyTemplates/432452342. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // New DeidentifyTemplate value. + DeidentifyTemplate *DeidentifyTemplate `protobuf:"bytes,2,opt,name=deidentify_template,json=deidentifyTemplate" json:"deidentify_template,omitempty"` + // Mask to control which fields get updated. + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateDeidentifyTemplateRequest) Reset() { *m = UpdateDeidentifyTemplateRequest{} } +func (m *UpdateDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDeidentifyTemplateRequest) ProtoMessage() {} +func (*UpdateDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{87} +} + +func (m *UpdateDeidentifyTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateDeidentifyTemplateRequest) GetDeidentifyTemplate() *DeidentifyTemplate { + if m != nil { + return m.DeidentifyTemplate + } + return nil +} + +func (m *UpdateDeidentifyTemplateRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request message for GetDeidentifyTemplate. +type GetDeidentifyTemplateRequest struct { + // Resource name of the organization and deidentify template to be read, for + // example `organizations/433245324/deidentifyTemplates/432452342` or + // projects/project-id/deidentifyTemplates/432452342. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetDeidentifyTemplateRequest) Reset() { *m = GetDeidentifyTemplateRequest{} } +func (m *GetDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*GetDeidentifyTemplateRequest) ProtoMessage() {} +func (*GetDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{88} } + +func (m *GetDeidentifyTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request message for ListDeidentifyTemplates. +type ListDeidentifyTemplatesRequest struct { + // The parent resource name, for example projects/my-project-id or + // organizations/my-org-id. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional page token to continue retrieval. Comes from previous call + // to `ListDeidentifyTemplates`. + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional size of the page, can be limited by server. If zero server returns + // a page of max size 100. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` +} + +func (m *ListDeidentifyTemplatesRequest) Reset() { *m = ListDeidentifyTemplatesRequest{} } +func (m *ListDeidentifyTemplatesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDeidentifyTemplatesRequest) ProtoMessage() {} +func (*ListDeidentifyTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{89} } + +func (m *ListDeidentifyTemplatesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDeidentifyTemplatesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListDeidentifyTemplatesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +// Response message for ListDeidentifyTemplates. +type ListDeidentifyTemplatesResponse struct { + // List of deidentify templates, up to page_size in + // ListDeidentifyTemplatesRequest. + DeidentifyTemplates []*DeidentifyTemplate `protobuf:"bytes,1,rep,name=deidentify_templates,json=deidentifyTemplates" json:"deidentify_templates,omitempty"` + // If the next page is available then the next page token to be used + // in following ListDeidentifyTemplates request. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDeidentifyTemplatesResponse) Reset() { *m = ListDeidentifyTemplatesResponse{} } +func (m *ListDeidentifyTemplatesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDeidentifyTemplatesResponse) ProtoMessage() {} +func (*ListDeidentifyTemplatesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{90} +} + +func (m *ListDeidentifyTemplatesResponse) GetDeidentifyTemplates() []*DeidentifyTemplate { + if m != nil { + return m.DeidentifyTemplates + } + return nil +} + +func (m *ListDeidentifyTemplatesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request message for DeleteDeidentifyTemplate. +type DeleteDeidentifyTemplateRequest struct { + // Resource name of the organization and deidentify template to be deleted, + // for example `organizations/433245324/deidentifyTemplates/432452342` or + // projects/project-id/deidentifyTemplates/432452342. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteDeidentifyTemplateRequest) Reset() { *m = DeleteDeidentifyTemplateRequest{} } +func (m *DeleteDeidentifyTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDeidentifyTemplateRequest) ProtoMessage() {} +func (*DeleteDeidentifyTemplateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{91} +} + +func (m *DeleteDeidentifyTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterType((*InspectConfig)(nil), "google.privacy.dlp.v2.InspectConfig") + proto.RegisterType((*InspectConfig_FindingLimits)(nil), "google.privacy.dlp.v2.InspectConfig.FindingLimits") + proto.RegisterType((*InspectConfig_FindingLimits_InfoTypeLimit)(nil), "google.privacy.dlp.v2.InspectConfig.FindingLimits.InfoTypeLimit") + proto.RegisterType((*ByteContentItem)(nil), "google.privacy.dlp.v2.ByteContentItem") + proto.RegisterType((*ContentItem)(nil), "google.privacy.dlp.v2.ContentItem") + proto.RegisterType((*Table)(nil), "google.privacy.dlp.v2.Table") + proto.RegisterType((*Table_Row)(nil), "google.privacy.dlp.v2.Table.Row") + proto.RegisterType((*InspectResult)(nil), "google.privacy.dlp.v2.InspectResult") + proto.RegisterType((*Finding)(nil), "google.privacy.dlp.v2.Finding") + proto.RegisterType((*Location)(nil), "google.privacy.dlp.v2.Location") + proto.RegisterType((*ContentLocation)(nil), "google.privacy.dlp.v2.ContentLocation") + proto.RegisterType((*DocumentLocation)(nil), "google.privacy.dlp.v2.DocumentLocation") + proto.RegisterType((*RecordLocation)(nil), "google.privacy.dlp.v2.RecordLocation") + proto.RegisterType((*TableLocation)(nil), "google.privacy.dlp.v2.TableLocation") + proto.RegisterType((*Range)(nil), "google.privacy.dlp.v2.Range") + proto.RegisterType((*ImageLocation)(nil), "google.privacy.dlp.v2.ImageLocation") + proto.RegisterType((*BoundingBox)(nil), "google.privacy.dlp.v2.BoundingBox") + proto.RegisterType((*RedactImageRequest)(nil), "google.privacy.dlp.v2.RedactImageRequest") + proto.RegisterType((*RedactImageRequest_ImageRedactionConfig)(nil), "google.privacy.dlp.v2.RedactImageRequest.ImageRedactionConfig") + proto.RegisterType((*Color)(nil), "google.privacy.dlp.v2.Color") + proto.RegisterType((*RedactImageResponse)(nil), "google.privacy.dlp.v2.RedactImageResponse") + proto.RegisterType((*DeidentifyContentRequest)(nil), "google.privacy.dlp.v2.DeidentifyContentRequest") + proto.RegisterType((*DeidentifyContentResponse)(nil), "google.privacy.dlp.v2.DeidentifyContentResponse") + proto.RegisterType((*ReidentifyContentRequest)(nil), "google.privacy.dlp.v2.ReidentifyContentRequest") + proto.RegisterType((*ReidentifyContentResponse)(nil), "google.privacy.dlp.v2.ReidentifyContentResponse") + proto.RegisterType((*InspectContentRequest)(nil), "google.privacy.dlp.v2.InspectContentRequest") + proto.RegisterType((*InspectContentResponse)(nil), "google.privacy.dlp.v2.InspectContentResponse") + proto.RegisterType((*OutputStorageConfig)(nil), "google.privacy.dlp.v2.OutputStorageConfig") + proto.RegisterType((*InfoTypeStats)(nil), "google.privacy.dlp.v2.InfoTypeStats") + proto.RegisterType((*InspectDataSourceDetails)(nil), "google.privacy.dlp.v2.InspectDataSourceDetails") + proto.RegisterType((*InspectDataSourceDetails_RequestedOptions)(nil), "google.privacy.dlp.v2.InspectDataSourceDetails.RequestedOptions") + proto.RegisterType((*InspectDataSourceDetails_Result)(nil), "google.privacy.dlp.v2.InspectDataSourceDetails.Result") + proto.RegisterType((*InfoTypeDescription)(nil), "google.privacy.dlp.v2.InfoTypeDescription") + proto.RegisterType((*ListInfoTypesRequest)(nil), "google.privacy.dlp.v2.ListInfoTypesRequest") + proto.RegisterType((*ListInfoTypesResponse)(nil), "google.privacy.dlp.v2.ListInfoTypesResponse") + proto.RegisterType((*RiskAnalysisJobConfig)(nil), "google.privacy.dlp.v2.RiskAnalysisJobConfig") + proto.RegisterType((*PrivacyMetric)(nil), "google.privacy.dlp.v2.PrivacyMetric") + proto.RegisterType((*PrivacyMetric_NumericalStatsConfig)(nil), "google.privacy.dlp.v2.PrivacyMetric.NumericalStatsConfig") + proto.RegisterType((*PrivacyMetric_CategoricalStatsConfig)(nil), "google.privacy.dlp.v2.PrivacyMetric.CategoricalStatsConfig") + proto.RegisterType((*PrivacyMetric_KAnonymityConfig)(nil), "google.privacy.dlp.v2.PrivacyMetric.KAnonymityConfig") + proto.RegisterType((*PrivacyMetric_LDiversityConfig)(nil), "google.privacy.dlp.v2.PrivacyMetric.LDiversityConfig") + proto.RegisterType((*PrivacyMetric_KMapEstimationConfig)(nil), "google.privacy.dlp.v2.PrivacyMetric.KMapEstimationConfig") + proto.RegisterType((*PrivacyMetric_KMapEstimationConfig_TaggedField)(nil), "google.privacy.dlp.v2.PrivacyMetric.KMapEstimationConfig.TaggedField") + proto.RegisterType((*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable)(nil), "google.privacy.dlp.v2.PrivacyMetric.KMapEstimationConfig.AuxiliaryTable") + proto.RegisterType((*PrivacyMetric_KMapEstimationConfig_AuxiliaryTable_QuasiIdField)(nil), "google.privacy.dlp.v2.PrivacyMetric.KMapEstimationConfig.AuxiliaryTable.QuasiIdField") + proto.RegisterType((*AnalyzeDataSourceRiskDetails)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_NumericalStatsResult)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.NumericalStatsResult") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_CategoricalStatsResult)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.CategoricalStatsResult") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_CategoricalStatsResult_CategoricalStatsHistogramBucket)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.CategoricalStatsResult.CategoricalStatsHistogramBucket") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_KAnonymityResult)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KAnonymityResult") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityEquivalenceClass)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KAnonymityResult.KAnonymityEquivalenceClass") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_KAnonymityResult_KAnonymityHistogramBucket)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KAnonymityResult.KAnonymityHistogramBucket") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_LDiversityResult)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.LDiversityResult") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityEquivalenceClass)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.LDiversityResult.LDiversityEquivalenceClass") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_LDiversityResult_LDiversityHistogramBucket)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.LDiversityResult.LDiversityHistogramBucket") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_KMapEstimationResult)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KMapEstimationResult") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationQuasiIdValues)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KMapEstimationResult.KMapEstimationQuasiIdValues") + proto.RegisterType((*AnalyzeDataSourceRiskDetails_KMapEstimationResult_KMapEstimationHistogramBucket)(nil), "google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KMapEstimationResult.KMapEstimationHistogramBucket") + proto.RegisterType((*ValueFrequency)(nil), "google.privacy.dlp.v2.ValueFrequency") + proto.RegisterType((*Value)(nil), "google.privacy.dlp.v2.Value") + proto.RegisterType((*QuoteInfo)(nil), "google.privacy.dlp.v2.QuoteInfo") + proto.RegisterType((*DateTime)(nil), "google.privacy.dlp.v2.DateTime") + proto.RegisterType((*DateTime_TimeZone)(nil), "google.privacy.dlp.v2.DateTime.TimeZone") + proto.RegisterType((*DeidentifyConfig)(nil), "google.privacy.dlp.v2.DeidentifyConfig") + proto.RegisterType((*PrimitiveTransformation)(nil), "google.privacy.dlp.v2.PrimitiveTransformation") + proto.RegisterType((*TimePartConfig)(nil), "google.privacy.dlp.v2.TimePartConfig") + proto.RegisterType((*CryptoHashConfig)(nil), "google.privacy.dlp.v2.CryptoHashConfig") + proto.RegisterType((*ReplaceValueConfig)(nil), "google.privacy.dlp.v2.ReplaceValueConfig") + proto.RegisterType((*ReplaceWithInfoTypeConfig)(nil), "google.privacy.dlp.v2.ReplaceWithInfoTypeConfig") + proto.RegisterType((*RedactConfig)(nil), "google.privacy.dlp.v2.RedactConfig") + proto.RegisterType((*CharsToIgnore)(nil), "google.privacy.dlp.v2.CharsToIgnore") + proto.RegisterType((*CharacterMaskConfig)(nil), "google.privacy.dlp.v2.CharacterMaskConfig") + proto.RegisterType((*FixedSizeBucketingConfig)(nil), "google.privacy.dlp.v2.FixedSizeBucketingConfig") + proto.RegisterType((*BucketingConfig)(nil), "google.privacy.dlp.v2.BucketingConfig") + proto.RegisterType((*BucketingConfig_Bucket)(nil), "google.privacy.dlp.v2.BucketingConfig.Bucket") + proto.RegisterType((*CryptoReplaceFfxFpeConfig)(nil), "google.privacy.dlp.v2.CryptoReplaceFfxFpeConfig") + proto.RegisterType((*CryptoKey)(nil), "google.privacy.dlp.v2.CryptoKey") + proto.RegisterType((*TransientCryptoKey)(nil), "google.privacy.dlp.v2.TransientCryptoKey") + proto.RegisterType((*UnwrappedCryptoKey)(nil), "google.privacy.dlp.v2.UnwrappedCryptoKey") + proto.RegisterType((*KmsWrappedCryptoKey)(nil), "google.privacy.dlp.v2.KmsWrappedCryptoKey") + proto.RegisterType((*DateShiftConfig)(nil), "google.privacy.dlp.v2.DateShiftConfig") + proto.RegisterType((*InfoTypeTransformations)(nil), "google.privacy.dlp.v2.InfoTypeTransformations") + proto.RegisterType((*InfoTypeTransformations_InfoTypeTransformation)(nil), "google.privacy.dlp.v2.InfoTypeTransformations.InfoTypeTransformation") + proto.RegisterType((*FieldTransformation)(nil), "google.privacy.dlp.v2.FieldTransformation") + proto.RegisterType((*RecordTransformations)(nil), "google.privacy.dlp.v2.RecordTransformations") + proto.RegisterType((*RecordSuppression)(nil), "google.privacy.dlp.v2.RecordSuppression") + proto.RegisterType((*RecordCondition)(nil), "google.privacy.dlp.v2.RecordCondition") + proto.RegisterType((*RecordCondition_Condition)(nil), "google.privacy.dlp.v2.RecordCondition.Condition") + proto.RegisterType((*RecordCondition_Conditions)(nil), "google.privacy.dlp.v2.RecordCondition.Conditions") + proto.RegisterType((*RecordCondition_Expressions)(nil), "google.privacy.dlp.v2.RecordCondition.Expressions") + proto.RegisterType((*TransformationOverview)(nil), "google.privacy.dlp.v2.TransformationOverview") + proto.RegisterType((*TransformationSummary)(nil), "google.privacy.dlp.v2.TransformationSummary") + proto.RegisterType((*TransformationSummary_SummaryResult)(nil), "google.privacy.dlp.v2.TransformationSummary.SummaryResult") + proto.RegisterType((*Schedule)(nil), "google.privacy.dlp.v2.Schedule") + proto.RegisterType((*InspectTemplate)(nil), "google.privacy.dlp.v2.InspectTemplate") + proto.RegisterType((*DeidentifyTemplate)(nil), "google.privacy.dlp.v2.DeidentifyTemplate") + proto.RegisterType((*Error)(nil), "google.privacy.dlp.v2.Error") + proto.RegisterType((*JobTrigger)(nil), "google.privacy.dlp.v2.JobTrigger") + proto.RegisterType((*JobTrigger_Trigger)(nil), "google.privacy.dlp.v2.JobTrigger.Trigger") + proto.RegisterType((*Action)(nil), "google.privacy.dlp.v2.Action") + proto.RegisterType((*Action_SaveFindings)(nil), "google.privacy.dlp.v2.Action.SaveFindings") + proto.RegisterType((*Action_PublishToPubSub)(nil), "google.privacy.dlp.v2.Action.PublishToPubSub") + proto.RegisterType((*CreateInspectTemplateRequest)(nil), "google.privacy.dlp.v2.CreateInspectTemplateRequest") + proto.RegisterType((*UpdateInspectTemplateRequest)(nil), "google.privacy.dlp.v2.UpdateInspectTemplateRequest") + proto.RegisterType((*GetInspectTemplateRequest)(nil), "google.privacy.dlp.v2.GetInspectTemplateRequest") + proto.RegisterType((*ListInspectTemplatesRequest)(nil), "google.privacy.dlp.v2.ListInspectTemplatesRequest") + proto.RegisterType((*ListInspectTemplatesResponse)(nil), "google.privacy.dlp.v2.ListInspectTemplatesResponse") + proto.RegisterType((*DeleteInspectTemplateRequest)(nil), "google.privacy.dlp.v2.DeleteInspectTemplateRequest") + proto.RegisterType((*CreateJobTriggerRequest)(nil), "google.privacy.dlp.v2.CreateJobTriggerRequest") + proto.RegisterType((*UpdateJobTriggerRequest)(nil), "google.privacy.dlp.v2.UpdateJobTriggerRequest") + proto.RegisterType((*GetJobTriggerRequest)(nil), "google.privacy.dlp.v2.GetJobTriggerRequest") + proto.RegisterType((*CreateDlpJobRequest)(nil), "google.privacy.dlp.v2.CreateDlpJobRequest") + proto.RegisterType((*ListJobTriggersRequest)(nil), "google.privacy.dlp.v2.ListJobTriggersRequest") + proto.RegisterType((*ListJobTriggersResponse)(nil), "google.privacy.dlp.v2.ListJobTriggersResponse") + proto.RegisterType((*DeleteJobTriggerRequest)(nil), "google.privacy.dlp.v2.DeleteJobTriggerRequest") + proto.RegisterType((*InspectJobConfig)(nil), "google.privacy.dlp.v2.InspectJobConfig") + proto.RegisterType((*DlpJob)(nil), "google.privacy.dlp.v2.DlpJob") + proto.RegisterType((*GetDlpJobRequest)(nil), "google.privacy.dlp.v2.GetDlpJobRequest") + proto.RegisterType((*ListDlpJobsRequest)(nil), "google.privacy.dlp.v2.ListDlpJobsRequest") + proto.RegisterType((*ListDlpJobsResponse)(nil), "google.privacy.dlp.v2.ListDlpJobsResponse") + proto.RegisterType((*CancelDlpJobRequest)(nil), "google.privacy.dlp.v2.CancelDlpJobRequest") + proto.RegisterType((*DeleteDlpJobRequest)(nil), "google.privacy.dlp.v2.DeleteDlpJobRequest") + proto.RegisterType((*CreateDeidentifyTemplateRequest)(nil), "google.privacy.dlp.v2.CreateDeidentifyTemplateRequest") + proto.RegisterType((*UpdateDeidentifyTemplateRequest)(nil), "google.privacy.dlp.v2.UpdateDeidentifyTemplateRequest") + proto.RegisterType((*GetDeidentifyTemplateRequest)(nil), "google.privacy.dlp.v2.GetDeidentifyTemplateRequest") + proto.RegisterType((*ListDeidentifyTemplatesRequest)(nil), "google.privacy.dlp.v2.ListDeidentifyTemplatesRequest") + proto.RegisterType((*ListDeidentifyTemplatesResponse)(nil), "google.privacy.dlp.v2.ListDeidentifyTemplatesResponse") + proto.RegisterType((*DeleteDeidentifyTemplateRequest)(nil), "google.privacy.dlp.v2.DeleteDeidentifyTemplateRequest") + proto.RegisterEnum("google.privacy.dlp.v2.ContentOption", ContentOption_name, ContentOption_value) + proto.RegisterEnum("google.privacy.dlp.v2.InfoTypeSupportedBy", InfoTypeSupportedBy_name, InfoTypeSupportedBy_value) + proto.RegisterEnum("google.privacy.dlp.v2.RelationalOperator", RelationalOperator_name, RelationalOperator_value) + proto.RegisterEnum("google.privacy.dlp.v2.DlpJobType", DlpJobType_name, DlpJobType_value) + proto.RegisterEnum("google.privacy.dlp.v2.ByteContentItem_BytesType", ByteContentItem_BytesType_name, ByteContentItem_BytesType_value) + proto.RegisterEnum("google.privacy.dlp.v2.OutputStorageConfig_OutputSchema", OutputStorageConfig_OutputSchema_name, OutputStorageConfig_OutputSchema_value) + proto.RegisterEnum("google.privacy.dlp.v2.TimePartConfig_TimePart", TimePartConfig_TimePart_name, TimePartConfig_TimePart_value) + proto.RegisterEnum("google.privacy.dlp.v2.CharsToIgnore_CommonCharsToIgnore", CharsToIgnore_CommonCharsToIgnore_name, CharsToIgnore_CommonCharsToIgnore_value) + proto.RegisterEnum("google.privacy.dlp.v2.CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet", CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_name, CryptoReplaceFfxFpeConfig_FfxCommonNativeAlphabet_value) + proto.RegisterEnum("google.privacy.dlp.v2.RecordCondition_Expressions_LogicalOperator", RecordCondition_Expressions_LogicalOperator_name, RecordCondition_Expressions_LogicalOperator_value) + proto.RegisterEnum("google.privacy.dlp.v2.TransformationSummary_TransformationResultCode", TransformationSummary_TransformationResultCode_name, TransformationSummary_TransformationResultCode_value) + proto.RegisterEnum("google.privacy.dlp.v2.JobTrigger_Status", JobTrigger_Status_name, JobTrigger_Status_value) + proto.RegisterEnum("google.privacy.dlp.v2.DlpJob_JobState", DlpJob_JobState_name, DlpJob_JobState_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DlpService service + +type DlpServiceClient interface { + // Finds potentially sensitive info in content. + // This method has limits on input size, processing time, and output size. + // [How-to guide for text](/dlp/docs/inspecting-text), [How-to guide for + // images](/dlp/docs/inspecting-images) + InspectContent(ctx context.Context, in *InspectContentRequest, opts ...grpc.CallOption) (*InspectContentResponse, error) + // Redacts potentially sensitive info from an image. + // This method has limits on input size, processing time, and output size. + // [How-to guide](/dlp/docs/redacting-sensitive-data-images) + RedactImage(ctx context.Context, in *RedactImageRequest, opts ...grpc.CallOption) (*RedactImageResponse, error) + // De-identifies potentially sensitive info from a ContentItem. + // This method has limits on input size and output size. + // [How-to guide](/dlp/docs/deidentify-sensitive-data) + DeidentifyContent(ctx context.Context, in *DeidentifyContentRequest, opts ...grpc.CallOption) (*DeidentifyContentResponse, error) + // Re-identify content that has been de-identified. + ReidentifyContent(ctx context.Context, in *ReidentifyContentRequest, opts ...grpc.CallOption) (*ReidentifyContentResponse, error) + // Returns sensitive information types DLP supports. + ListInfoTypes(ctx context.Context, in *ListInfoTypesRequest, opts ...grpc.CallOption) (*ListInfoTypesResponse, error) + // Creates an inspect template for re-using frequently used configuration + // for inspecting content, images, and storage. + CreateInspectTemplate(ctx context.Context, in *CreateInspectTemplateRequest, opts ...grpc.CallOption) (*InspectTemplate, error) + // Updates the inspect template. + UpdateInspectTemplate(ctx context.Context, in *UpdateInspectTemplateRequest, opts ...grpc.CallOption) (*InspectTemplate, error) + // Gets an inspect template. + GetInspectTemplate(ctx context.Context, in *GetInspectTemplateRequest, opts ...grpc.CallOption) (*InspectTemplate, error) + // Lists inspect templates. + ListInspectTemplates(ctx context.Context, in *ListInspectTemplatesRequest, opts ...grpc.CallOption) (*ListInspectTemplatesResponse, error) + // Deletes inspect templates. + DeleteInspectTemplate(ctx context.Context, in *DeleteInspectTemplateRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Creates an Deidentify template for re-using frequently used configuration + // for Deidentifying content, images, and storage. + CreateDeidentifyTemplate(ctx context.Context, in *CreateDeidentifyTemplateRequest, opts ...grpc.CallOption) (*DeidentifyTemplate, error) + // Updates the inspect template. + UpdateDeidentifyTemplate(ctx context.Context, in *UpdateDeidentifyTemplateRequest, opts ...grpc.CallOption) (*DeidentifyTemplate, error) + // Gets an inspect template. + GetDeidentifyTemplate(ctx context.Context, in *GetDeidentifyTemplateRequest, opts ...grpc.CallOption) (*DeidentifyTemplate, error) + // Lists inspect templates. + ListDeidentifyTemplates(ctx context.Context, in *ListDeidentifyTemplatesRequest, opts ...grpc.CallOption) (*ListDeidentifyTemplatesResponse, error) + // Deletes inspect templates. + DeleteDeidentifyTemplate(ctx context.Context, in *DeleteDeidentifyTemplateRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Creates a job to run DLP actions such as scanning storage for sensitive + // information on a set schedule. + CreateJobTrigger(ctx context.Context, in *CreateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) + // Updates a job trigger. + UpdateJobTrigger(ctx context.Context, in *UpdateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) + // Gets a job trigger. + GetJobTrigger(ctx context.Context, in *GetJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) + // Lists job triggers. + ListJobTriggers(ctx context.Context, in *ListJobTriggersRequest, opts ...grpc.CallOption) (*ListJobTriggersResponse, error) + // Deletes a job trigger. + DeleteJobTrigger(ctx context.Context, in *DeleteJobTriggerRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Create a new job to inspect storage or calculate risk metrics [How-to + // guide](/dlp/docs/compute-risk-analysis). + CreateDlpJob(ctx context.Context, in *CreateDlpJobRequest, opts ...grpc.CallOption) (*DlpJob, error) + // Lists DlpJobs that match the specified filter in the request. + ListDlpJobs(ctx context.Context, in *ListDlpJobsRequest, opts ...grpc.CallOption) (*ListDlpJobsResponse, error) + // Gets the latest state of a long-running DlpJob. + GetDlpJob(ctx context.Context, in *GetDlpJobRequest, opts ...grpc.CallOption) (*DlpJob, error) + // Deletes a long-running DlpJob. This method indicates that the client is + // no longer interested in the DlpJob result. The job will be cancelled if + // possible. + DeleteDlpJob(ctx context.Context, in *DeleteDlpJobRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Starts asynchronous cancellation on a long-running DlpJob. The server + // makes a best effort to cancel the DlpJob, but success is not + // guaranteed. + CancelDlpJob(ctx context.Context, in *CancelDlpJobRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) +} + +type dlpServiceClient struct { + cc *grpc.ClientConn +} + +func NewDlpServiceClient(cc *grpc.ClientConn) DlpServiceClient { + return &dlpServiceClient{cc} +} + +func (c *dlpServiceClient) InspectContent(ctx context.Context, in *InspectContentRequest, opts ...grpc.CallOption) (*InspectContentResponse, error) { + out := new(InspectContentResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/InspectContent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) RedactImage(ctx context.Context, in *RedactImageRequest, opts ...grpc.CallOption) (*RedactImageResponse, error) { + out := new(RedactImageResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/RedactImage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) DeidentifyContent(ctx context.Context, in *DeidentifyContentRequest, opts ...grpc.CallOption) (*DeidentifyContentResponse, error) { + out := new(DeidentifyContentResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/DeidentifyContent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ReidentifyContent(ctx context.Context, in *ReidentifyContentRequest, opts ...grpc.CallOption) (*ReidentifyContentResponse, error) { + out := new(ReidentifyContentResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/ReidentifyContent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListInfoTypes(ctx context.Context, in *ListInfoTypesRequest, opts ...grpc.CallOption) (*ListInfoTypesResponse, error) { + out := new(ListInfoTypesResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/ListInfoTypes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) CreateInspectTemplate(ctx context.Context, in *CreateInspectTemplateRequest, opts ...grpc.CallOption) (*InspectTemplate, error) { + out := new(InspectTemplate) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/CreateInspectTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) UpdateInspectTemplate(ctx context.Context, in *UpdateInspectTemplateRequest, opts ...grpc.CallOption) (*InspectTemplate, error) { + out := new(InspectTemplate) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/UpdateInspectTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) GetInspectTemplate(ctx context.Context, in *GetInspectTemplateRequest, opts ...grpc.CallOption) (*InspectTemplate, error) { + out := new(InspectTemplate) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/GetInspectTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListInspectTemplates(ctx context.Context, in *ListInspectTemplatesRequest, opts ...grpc.CallOption) (*ListInspectTemplatesResponse, error) { + out := new(ListInspectTemplatesResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/ListInspectTemplates", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) DeleteInspectTemplate(ctx context.Context, in *DeleteInspectTemplateRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/DeleteInspectTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) CreateDeidentifyTemplate(ctx context.Context, in *CreateDeidentifyTemplateRequest, opts ...grpc.CallOption) (*DeidentifyTemplate, error) { + out := new(DeidentifyTemplate) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/CreateDeidentifyTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) UpdateDeidentifyTemplate(ctx context.Context, in *UpdateDeidentifyTemplateRequest, opts ...grpc.CallOption) (*DeidentifyTemplate, error) { + out := new(DeidentifyTemplate) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/UpdateDeidentifyTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) GetDeidentifyTemplate(ctx context.Context, in *GetDeidentifyTemplateRequest, opts ...grpc.CallOption) (*DeidentifyTemplate, error) { + out := new(DeidentifyTemplate) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/GetDeidentifyTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListDeidentifyTemplates(ctx context.Context, in *ListDeidentifyTemplatesRequest, opts ...grpc.CallOption) (*ListDeidentifyTemplatesResponse, error) { + out := new(ListDeidentifyTemplatesResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/ListDeidentifyTemplates", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) DeleteDeidentifyTemplate(ctx context.Context, in *DeleteDeidentifyTemplateRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/DeleteDeidentifyTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) CreateJobTrigger(ctx context.Context, in *CreateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) { + out := new(JobTrigger) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/CreateJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) UpdateJobTrigger(ctx context.Context, in *UpdateJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) { + out := new(JobTrigger) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/UpdateJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) GetJobTrigger(ctx context.Context, in *GetJobTriggerRequest, opts ...grpc.CallOption) (*JobTrigger, error) { + out := new(JobTrigger) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/GetJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListJobTriggers(ctx context.Context, in *ListJobTriggersRequest, opts ...grpc.CallOption) (*ListJobTriggersResponse, error) { + out := new(ListJobTriggersResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/ListJobTriggers", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) DeleteJobTrigger(ctx context.Context, in *DeleteJobTriggerRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/DeleteJobTrigger", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) CreateDlpJob(ctx context.Context, in *CreateDlpJobRequest, opts ...grpc.CallOption) (*DlpJob, error) { + out := new(DlpJob) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/CreateDlpJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) ListDlpJobs(ctx context.Context, in *ListDlpJobsRequest, opts ...grpc.CallOption) (*ListDlpJobsResponse, error) { + out := new(ListDlpJobsResponse) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/ListDlpJobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) GetDlpJob(ctx context.Context, in *GetDlpJobRequest, opts ...grpc.CallOption) (*DlpJob, error) { + out := new(DlpJob) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/GetDlpJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) DeleteDlpJob(ctx context.Context, in *DeleteDlpJobRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/DeleteDlpJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dlpServiceClient) CancelDlpJob(ctx context.Context, in *CancelDlpJobRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.privacy.dlp.v2.DlpService/CancelDlpJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DlpService service + +type DlpServiceServer interface { + // Finds potentially sensitive info in content. + // This method has limits on input size, processing time, and output size. + // [How-to guide for text](/dlp/docs/inspecting-text), [How-to guide for + // images](/dlp/docs/inspecting-images) + InspectContent(context.Context, *InspectContentRequest) (*InspectContentResponse, error) + // Redacts potentially sensitive info from an image. + // This method has limits on input size, processing time, and output size. + // [How-to guide](/dlp/docs/redacting-sensitive-data-images) + RedactImage(context.Context, *RedactImageRequest) (*RedactImageResponse, error) + // De-identifies potentially sensitive info from a ContentItem. + // This method has limits on input size and output size. + // [How-to guide](/dlp/docs/deidentify-sensitive-data) + DeidentifyContent(context.Context, *DeidentifyContentRequest) (*DeidentifyContentResponse, error) + // Re-identify content that has been de-identified. + ReidentifyContent(context.Context, *ReidentifyContentRequest) (*ReidentifyContentResponse, error) + // Returns sensitive information types DLP supports. + ListInfoTypes(context.Context, *ListInfoTypesRequest) (*ListInfoTypesResponse, error) + // Creates an inspect template for re-using frequently used configuration + // for inspecting content, images, and storage. + CreateInspectTemplate(context.Context, *CreateInspectTemplateRequest) (*InspectTemplate, error) + // Updates the inspect template. + UpdateInspectTemplate(context.Context, *UpdateInspectTemplateRequest) (*InspectTemplate, error) + // Gets an inspect template. + GetInspectTemplate(context.Context, *GetInspectTemplateRequest) (*InspectTemplate, error) + // Lists inspect templates. + ListInspectTemplates(context.Context, *ListInspectTemplatesRequest) (*ListInspectTemplatesResponse, error) + // Deletes inspect templates. + DeleteInspectTemplate(context.Context, *DeleteInspectTemplateRequest) (*google_protobuf3.Empty, error) + // Creates an Deidentify template for re-using frequently used configuration + // for Deidentifying content, images, and storage. + CreateDeidentifyTemplate(context.Context, *CreateDeidentifyTemplateRequest) (*DeidentifyTemplate, error) + // Updates the inspect template. + UpdateDeidentifyTemplate(context.Context, *UpdateDeidentifyTemplateRequest) (*DeidentifyTemplate, error) + // Gets an inspect template. + GetDeidentifyTemplate(context.Context, *GetDeidentifyTemplateRequest) (*DeidentifyTemplate, error) + // Lists inspect templates. + ListDeidentifyTemplates(context.Context, *ListDeidentifyTemplatesRequest) (*ListDeidentifyTemplatesResponse, error) + // Deletes inspect templates. + DeleteDeidentifyTemplate(context.Context, *DeleteDeidentifyTemplateRequest) (*google_protobuf3.Empty, error) + // Creates a job to run DLP actions such as scanning storage for sensitive + // information on a set schedule. + CreateJobTrigger(context.Context, *CreateJobTriggerRequest) (*JobTrigger, error) + // Updates a job trigger. + UpdateJobTrigger(context.Context, *UpdateJobTriggerRequest) (*JobTrigger, error) + // Gets a job trigger. + GetJobTrigger(context.Context, *GetJobTriggerRequest) (*JobTrigger, error) + // Lists job triggers. + ListJobTriggers(context.Context, *ListJobTriggersRequest) (*ListJobTriggersResponse, error) + // Deletes a job trigger. + DeleteJobTrigger(context.Context, *DeleteJobTriggerRequest) (*google_protobuf3.Empty, error) + // Create a new job to inspect storage or calculate risk metrics [How-to + // guide](/dlp/docs/compute-risk-analysis). + CreateDlpJob(context.Context, *CreateDlpJobRequest) (*DlpJob, error) + // Lists DlpJobs that match the specified filter in the request. + ListDlpJobs(context.Context, *ListDlpJobsRequest) (*ListDlpJobsResponse, error) + // Gets the latest state of a long-running DlpJob. + GetDlpJob(context.Context, *GetDlpJobRequest) (*DlpJob, error) + // Deletes a long-running DlpJob. This method indicates that the client is + // no longer interested in the DlpJob result. The job will be cancelled if + // possible. + DeleteDlpJob(context.Context, *DeleteDlpJobRequest) (*google_protobuf3.Empty, error) + // Starts asynchronous cancellation on a long-running DlpJob. The server + // makes a best effort to cancel the DlpJob, but success is not + // guaranteed. + CancelDlpJob(context.Context, *CancelDlpJobRequest) (*google_protobuf3.Empty, error) +} + +func RegisterDlpServiceServer(s *grpc.Server, srv DlpServiceServer) { + s.RegisterService(&_DlpService_serviceDesc, srv) +} + +func _DlpService_InspectContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InspectContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).InspectContent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/InspectContent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).InspectContent(ctx, req.(*InspectContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_RedactImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RedactImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).RedactImage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/RedactImage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).RedactImage(ctx, req.(*RedactImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_DeidentifyContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeidentifyContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).DeidentifyContent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/DeidentifyContent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).DeidentifyContent(ctx, req.(*DeidentifyContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ReidentifyContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReidentifyContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ReidentifyContent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/ReidentifyContent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ReidentifyContent(ctx, req.(*ReidentifyContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListInfoTypes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInfoTypesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListInfoTypes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/ListInfoTypes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListInfoTypes(ctx, req.(*ListInfoTypesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_CreateInspectTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateInspectTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CreateInspectTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/CreateInspectTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CreateInspectTemplate(ctx, req.(*CreateInspectTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_UpdateInspectTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateInspectTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).UpdateInspectTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/UpdateInspectTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).UpdateInspectTemplate(ctx, req.(*UpdateInspectTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_GetInspectTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetInspectTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).GetInspectTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/GetInspectTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).GetInspectTemplate(ctx, req.(*GetInspectTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListInspectTemplates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListInspectTemplatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListInspectTemplates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/ListInspectTemplates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListInspectTemplates(ctx, req.(*ListInspectTemplatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_DeleteInspectTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteInspectTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).DeleteInspectTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/DeleteInspectTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).DeleteInspectTemplate(ctx, req.(*DeleteInspectTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_CreateDeidentifyTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDeidentifyTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CreateDeidentifyTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/CreateDeidentifyTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CreateDeidentifyTemplate(ctx, req.(*CreateDeidentifyTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_UpdateDeidentifyTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDeidentifyTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).UpdateDeidentifyTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/UpdateDeidentifyTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).UpdateDeidentifyTemplate(ctx, req.(*UpdateDeidentifyTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_GetDeidentifyTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDeidentifyTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).GetDeidentifyTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/GetDeidentifyTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).GetDeidentifyTemplate(ctx, req.(*GetDeidentifyTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListDeidentifyTemplates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDeidentifyTemplatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListDeidentifyTemplates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/ListDeidentifyTemplates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListDeidentifyTemplates(ctx, req.(*ListDeidentifyTemplatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_DeleteDeidentifyTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDeidentifyTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).DeleteDeidentifyTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/DeleteDeidentifyTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).DeleteDeidentifyTemplate(ctx, req.(*DeleteDeidentifyTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_CreateJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CreateJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/CreateJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CreateJobTrigger(ctx, req.(*CreateJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_UpdateJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).UpdateJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/UpdateJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).UpdateJobTrigger(ctx, req.(*UpdateJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_GetJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).GetJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/GetJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).GetJobTrigger(ctx, req.(*GetJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListJobTriggers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobTriggersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListJobTriggers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/ListJobTriggers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListJobTriggers(ctx, req.(*ListJobTriggersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_DeleteJobTrigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteJobTriggerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).DeleteJobTrigger(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/DeleteJobTrigger", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).DeleteJobTrigger(ctx, req.(*DeleteJobTriggerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_CreateDlpJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDlpJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CreateDlpJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/CreateDlpJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CreateDlpJob(ctx, req.(*CreateDlpJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_ListDlpJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDlpJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).ListDlpJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/ListDlpJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).ListDlpJobs(ctx, req.(*ListDlpJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_GetDlpJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDlpJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).GetDlpJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/GetDlpJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).GetDlpJob(ctx, req.(*GetDlpJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_DeleteDlpJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDlpJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).DeleteDlpJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/DeleteDlpJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).DeleteDlpJob(ctx, req.(*DeleteDlpJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DlpService_CancelDlpJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelDlpJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DlpServiceServer).CancelDlpJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.privacy.dlp.v2.DlpService/CancelDlpJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DlpServiceServer).CancelDlpJob(ctx, req.(*CancelDlpJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DlpService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.privacy.dlp.v2.DlpService", + HandlerType: (*DlpServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "InspectContent", + Handler: _DlpService_InspectContent_Handler, + }, + { + MethodName: "RedactImage", + Handler: _DlpService_RedactImage_Handler, + }, + { + MethodName: "DeidentifyContent", + Handler: _DlpService_DeidentifyContent_Handler, + }, + { + MethodName: "ReidentifyContent", + Handler: _DlpService_ReidentifyContent_Handler, + }, + { + MethodName: "ListInfoTypes", + Handler: _DlpService_ListInfoTypes_Handler, + }, + { + MethodName: "CreateInspectTemplate", + Handler: _DlpService_CreateInspectTemplate_Handler, + }, + { + MethodName: "UpdateInspectTemplate", + Handler: _DlpService_UpdateInspectTemplate_Handler, + }, + { + MethodName: "GetInspectTemplate", + Handler: _DlpService_GetInspectTemplate_Handler, + }, + { + MethodName: "ListInspectTemplates", + Handler: _DlpService_ListInspectTemplates_Handler, + }, + { + MethodName: "DeleteInspectTemplate", + Handler: _DlpService_DeleteInspectTemplate_Handler, + }, + { + MethodName: "CreateDeidentifyTemplate", + Handler: _DlpService_CreateDeidentifyTemplate_Handler, + }, + { + MethodName: "UpdateDeidentifyTemplate", + Handler: _DlpService_UpdateDeidentifyTemplate_Handler, + }, + { + MethodName: "GetDeidentifyTemplate", + Handler: _DlpService_GetDeidentifyTemplate_Handler, + }, + { + MethodName: "ListDeidentifyTemplates", + Handler: _DlpService_ListDeidentifyTemplates_Handler, + }, + { + MethodName: "DeleteDeidentifyTemplate", + Handler: _DlpService_DeleteDeidentifyTemplate_Handler, + }, + { + MethodName: "CreateJobTrigger", + Handler: _DlpService_CreateJobTrigger_Handler, + }, + { + MethodName: "UpdateJobTrigger", + Handler: _DlpService_UpdateJobTrigger_Handler, + }, + { + MethodName: "GetJobTrigger", + Handler: _DlpService_GetJobTrigger_Handler, + }, + { + MethodName: "ListJobTriggers", + Handler: _DlpService_ListJobTriggers_Handler, + }, + { + MethodName: "DeleteJobTrigger", + Handler: _DlpService_DeleteJobTrigger_Handler, + }, + { + MethodName: "CreateDlpJob", + Handler: _DlpService_CreateDlpJob_Handler, + }, + { + MethodName: "ListDlpJobs", + Handler: _DlpService_ListDlpJobs_Handler, + }, + { + MethodName: "GetDlpJob", + Handler: _DlpService_GetDlpJob_Handler, + }, + { + MethodName: "DeleteDlpJob", + Handler: _DlpService_DeleteDlpJob_Handler, + }, + { + MethodName: "CancelDlpJob", + Handler: _DlpService_CancelDlpJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/privacy/dlp/v2/dlp.proto", +} + +func init() { proto.RegisterFile("google/privacy/dlp/v2/dlp.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 7980 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x7d, 0x6d, 0x6c, 0x1b, 0xd9, + 0x76, 0x98, 0x86, 0x14, 0x25, 0xea, 0x50, 0xa4, 0xa8, 0x2b, 0x4b, 0x96, 0x69, 0x7b, 0xed, 0x1d, + 0xef, 0x7a, 0xbd, 0x5a, 0x5b, 0xda, 0xd5, 0x7e, 0xbc, 0x5d, 0xef, 0xbe, 0x4d, 0x28, 0x92, 0x16, + 0xe9, 0x95, 0x44, 0x79, 0x48, 0x79, 0xd7, 0xce, 0x62, 0xa7, 0x23, 0xf2, 0x8a, 0x1a, 0x8b, 0xe4, + 0xd0, 0x33, 0x43, 0x5b, 0xda, 0x24, 0xc0, 0x4b, 0xd1, 0x36, 0x48, 0xd1, 0x87, 0x06, 0x68, 0x5f, + 0x1f, 0x9a, 0xb4, 0x41, 0xda, 0xfe, 0x28, 0x10, 0x20, 0x2d, 0x8a, 0xb6, 0x09, 0xd2, 0xa6, 0xfd, + 0x91, 0x7e, 0xa2, 0x28, 0x8a, 0x17, 0xbc, 0x5f, 0x45, 0x51, 0x20, 0x7d, 0xfd, 0x40, 0x81, 0xcd, + 0x9f, 0x02, 0xfd, 0xd1, 0x0f, 0xa0, 0x28, 0xee, 0xd7, 0x7c, 0x71, 0x86, 0x1f, 0xb2, 0x17, 0x09, + 0xf2, 0x4b, 0x9c, 0x73, 0xcf, 0x39, 0xf7, 0xdc, 0x73, 0xcf, 0x3d, 0xf7, 0xdc, 0x73, 0x3f, 0x04, + 0xd7, 0x5a, 0x86, 0xd1, 0x6a, 0xe3, 0x8d, 0x9e, 0xa9, 0x3f, 0xd3, 0x1a, 0x67, 0x1b, 0xcd, 0x76, + 0x6f, 0xe3, 0xd9, 0x26, 0xf9, 0xb3, 0xde, 0x33, 0x0d, 0xdb, 0x40, 0xcb, 0x0c, 0x61, 0x9d, 0x23, + 0xac, 0x93, 0x92, 0x67, 0x9b, 0xb9, 0x2b, 0x9c, 0x4e, 0xeb, 0xe9, 0x1b, 0x5a, 0xb7, 0x6b, 0xd8, + 0x9a, 0xad, 0x1b, 0x5d, 0x8b, 0x11, 0xe5, 0x6e, 0x84, 0x73, 0xb5, 0x6c, 0xc3, 0xd4, 0x5a, 0x98, + 0x23, 0xbd, 0xe2, 0x20, 0x19, 0xb6, 0x71, 0xd8, 0x3f, 0xda, 0x68, 0xf6, 0x4d, 0xca, 0x85, 0x97, + 0x5f, 0x0e, 0x96, 0xe3, 0x4e, 0xcf, 0x3e, 0xe3, 0x85, 0xd7, 0x83, 0x85, 0x47, 0x3a, 0x6e, 0x37, + 0xd5, 0x8e, 0x66, 0x9d, 0x70, 0x8c, 0x6b, 0x41, 0x0c, 0x5b, 0xef, 0x60, 0xcb, 0xd6, 0x3a, 0xbc, + 0x65, 0xb9, 0x8b, 0x1c, 0xc1, 0xec, 0x35, 0x36, 0x2c, 0x5b, 0xb3, 0xfb, 0x42, 0xfa, 0x15, 0x5e, + 0x60, 0x9f, 0xf5, 0xf0, 0x46, 0x53, 0xb3, 0x71, 0x40, 0x20, 0x0e, 0x3f, 0x33, 0x8e, 0x9e, 0x63, + 0x7c, 0x12, 0x56, 0x48, 0xaa, 0x32, 0x8e, 0x9a, 0x1a, 0x97, 0x56, 0xfe, 0xd7, 0x33, 0x90, 0xae, + 0x74, 0xad, 0x1e, 0x6e, 0xd8, 0x05, 0xa3, 0x7b, 0xa4, 0xb7, 0xd0, 0xa7, 0x00, 0x7a, 0xf7, 0xc8, + 0x50, 0x09, 0xba, 0xb5, 0x2a, 0x5d, 0x8f, 0xdf, 0x4a, 0x6d, 0x5e, 0x5b, 0x0f, 0xd5, 0xf5, 0x7a, + 0xa5, 0x7b, 0x64, 0xd4, 0xcf, 0x7a, 0x58, 0x99, 0xd3, 0xf9, 0x2f, 0x0b, 0x95, 0x21, 0xd3, 0xd1, + 0xbb, 0x6a, 0x5b, 0x3f, 0xc1, 0x6d, 0xfd, 0xd8, 0x30, 0x9a, 0xab, 0xb1, 0xeb, 0xd2, 0xad, 0xcc, + 0xe6, 0xab, 0x11, 0x3c, 0x76, 0x1c, 0x44, 0x25, 0xdd, 0xd1, 0xbb, 0xee, 0x27, 0xba, 0x0f, 0x33, + 0x6d, 0xbd, 0xa3, 0xdb, 0xd6, 0x6a, 0xfc, 0xba, 0x74, 0x2b, 0xb5, 0xb9, 0x19, 0x29, 0x85, 0x47, + 0xfe, 0xf5, 0x7b, 0x7a, 0xb7, 0xa9, 0x77, 0x5b, 0x3b, 0x94, 0x52, 0xe1, 0x1c, 0xd0, 0x0d, 0x48, + 0xeb, 0xdd, 0x46, 0xbb, 0xdf, 0xc4, 0xea, 0xd3, 0xbe, 0x61, 0xe3, 0xd5, 0xe9, 0xeb, 0xd2, 0xad, + 0xa4, 0x32, 0xcf, 0x81, 0x0f, 0x08, 0x0c, 0xdd, 0x06, 0x84, 0x4f, 0x19, 0x92, 0x47, 0x05, 0x09, + 0x8a, 0x99, 0xe5, 0x25, 0x15, 0xa7, 0xa1, 0x0f, 0x60, 0xb1, 0xd1, 0xb7, 0x6c, 0xa3, 0xe3, 0x45, + 0x9e, 0xa1, 0xfa, 0x7a, 0x3d, 0x42, 0xd2, 0x02, 0xc5, 0x77, 0xb4, 0xb6, 0xd0, 0xf0, 0x7d, 0x5b, + 0x68, 0x17, 0x16, 0x1a, 0x46, 0xd7, 0xc6, 0x5d, 0x5b, 0x35, 0x7a, 0xd4, 0x6c, 0x57, 0x93, 0xd7, + 0xe3, 0xb7, 0x32, 0x9b, 0xaf, 0x45, 0x31, 0x64, 0xd8, 0x55, 0x8a, 0xac, 0x64, 0x1a, 0xde, 0x4f, + 0x2b, 0xf7, 0x4d, 0x0c, 0xd2, 0x3e, 0x75, 0xa0, 0x77, 0x60, 0xb9, 0xa3, 0x9d, 0xaa, 0x47, 0x0c, + 0x68, 0xa9, 0x3d, 0x6c, 0xaa, 0xba, 0x8d, 0x3b, 0xab, 0xd2, 0x75, 0xe9, 0x56, 0x42, 0x41, 0x1d, + 0xed, 0x94, 0x13, 0x58, 0xfb, 0xd8, 0xac, 0xd8, 0xb8, 0x83, 0xbe, 0x03, 0xab, 0x03, 0x24, 0x26, + 0x7e, 0xda, 0xc7, 0x96, 0x4d, 0x7b, 0x36, 0xa1, 0x2c, 0xfb, 0xa9, 0x14, 0x56, 0x88, 0x7e, 0x0e, + 0x72, 0x83, 0x75, 0x09, 0x4d, 0xad, 0xc6, 0xa9, 0xa2, 0x7e, 0x7a, 0xf2, 0x2e, 0x75, 0x8c, 0x8e, + 0x7e, 0x2a, 0x2b, 0x01, 0x91, 0x79, 0x61, 0xae, 0x47, 0xec, 0xda, 0x83, 0x88, 0x3e, 0x81, 0x39, + 0xb7, 0x76, 0x89, 0x1a, 0xd4, 0x48, 0xb3, 0x4e, 0x0a, 0xb3, 0x46, 0xaf, 0xc2, 0xbc, 0xb7, 0x31, + 0xbc, 0xe5, 0x29, 0x4f, 0xe5, 0xf2, 0x1f, 0x48, 0xb0, 0xb0, 0x75, 0x66, 0x63, 0xde, 0x27, 0x54, + 0x79, 0x45, 0x98, 0x76, 0xea, 0xcb, 0x6c, 0xbe, 0x1d, 0x51, 0x5f, 0x80, 0x8a, 0x7e, 0x5b, 0x54, + 0x00, 0x4a, 0x8d, 0x10, 0x4c, 0x37, 0x35, 0x5b, 0xa3, 0x95, 0xce, 0x2b, 0xf4, 0xb7, 0x6c, 0xc1, + 0x9c, 0x83, 0x86, 0x72, 0xb0, 0xb2, 0xf5, 0xa8, 0x5e, 0xaa, 0xa9, 0xf5, 0x47, 0xfb, 0x25, 0xf5, + 0x60, 0xaf, 0xb6, 0x5f, 0x2a, 0x54, 0xee, 0x55, 0x4a, 0xc5, 0xec, 0x14, 0xca, 0x00, 0x54, 0x76, + 0xf3, 0xdb, 0x25, 0xf5, 0xfe, 0x7e, 0x69, 0x3b, 0x2b, 0xa1, 0x34, 0xcc, 0xb1, 0xef, 0xad, 0xdd, + 0xfd, 0x6c, 0xcc, 0xfd, 0xdc, 0xdf, 0xdb, 0xce, 0xc6, 0xdd, 0xcf, 0xda, 0xc3, 0xed, 0xec, 0x34, + 0xf9, 0xac, 0x97, 0xbe, 0xa8, 0xab, 0x07, 0xf5, 0x7b, 0x1f, 0x66, 0x13, 0xf2, 0xdf, 0x95, 0x20, + 0xe5, 0x6d, 0xde, 0x0a, 0x24, 0x9e, 0x69, 0xed, 0x3e, 0xa6, 0x03, 0x74, 0xae, 0x3c, 0xa5, 0xb0, + 0x4f, 0xf4, 0x1e, 0x24, 0x6c, 0xed, 0xb0, 0xcd, 0x46, 0x59, 0x6a, 0xf3, 0x4a, 0x44, 0xbb, 0xeb, + 0x04, 0x87, 0x50, 0x51, 0x64, 0x54, 0x82, 0xb9, 0xc3, 0x33, 0x1b, 0x33, 0x83, 0x4c, 0x50, 0xca, + 0x9b, 0xe3, 0x69, 0xac, 0x3c, 0xa5, 0x24, 0x09, 0x29, 0xf9, 0xbd, 0x95, 0x82, 0x39, 0xa2, 0x21, + 0xca, 0x46, 0xfe, 0x07, 0x12, 0x24, 0x68, 0x35, 0xe8, 0x43, 0x98, 0x3d, 0xc6, 0x5a, 0x13, 0x9b, + 0xc2, 0xa9, 0xbd, 0x12, 0xc1, 0xfb, 0x1e, 0xf1, 0xd7, 0x95, 0xa6, 0x22, 0xd0, 0xd1, 0x7b, 0x30, + 0x6d, 0x1a, 0xcf, 0x49, 0x9f, 0x13, 0xb2, 0xeb, 0xc3, 0x1a, 0xb3, 0xae, 0x18, 0xcf, 0x15, 0x8a, + 0x9d, 0xfb, 0x18, 0xe2, 0x8a, 0xf1, 0x1c, 0xbd, 0x07, 0x33, 0x54, 0x27, 0xa2, 0xd6, 0x28, 0x5d, + 0x3c, 0x24, 0x48, 0x0a, 0xc7, 0x95, 0xbf, 0x76, 0xbc, 0xb2, 0x82, 0xad, 0x7e, 0xdb, 0x46, 0x77, + 0x21, 0xe9, 0xd8, 0xde, 0x28, 0xf1, 0x29, 0x9a, 0xe2, 0xe0, 0xa3, 0x3b, 0x80, 0x9c, 0x41, 0x68, + 0x9b, 0xfd, 0x6e, 0x43, 0xb3, 0x31, 0xf3, 0xca, 0x49, 0x65, 0x51, 0x94, 0xd4, 0x45, 0x81, 0xfc, + 0x1f, 0x62, 0x30, 0xcb, 0x99, 0xa0, 0x0b, 0x90, 0x60, 0xee, 0x92, 0x18, 0xf0, 0x9c, 0xc2, 0x3e, + 0xfc, 0x43, 0x29, 0x36, 0xe9, 0x50, 0xca, 0x03, 0x78, 0x26, 0x87, 0xf8, 0xb8, 0x93, 0x83, 0x87, + 0x08, 0x7d, 0x0c, 0xc9, 0xb6, 0xd1, 0xa0, 0x53, 0x32, 0x37, 0xb1, 0xa8, 0xfa, 0x77, 0x38, 0x9a, + 0xe2, 0x10, 0xa0, 0x8f, 0x21, 0xd5, 0x30, 0xb1, 0x66, 0x63, 0x95, 0x4c, 0x86, 0xab, 0x33, 0x94, + 0x3e, 0xe7, 0xd2, 0xb3, 0x49, 0x79, 0xbd, 0x2e, 0x26, 0x65, 0x05, 0x18, 0x3a, 0x01, 0xa0, 0x9f, + 0x02, 0xa0, 0x3a, 0xa0, 0x9e, 0x6c, 0x75, 0x96, 0xd2, 0x46, 0x59, 0x04, 0x9d, 0x54, 0x88, 0x02, + 0x94, 0xb9, 0xa7, 0xe2, 0xa7, 0xfc, 0x9f, 0x25, 0x48, 0xee, 0xb8, 0xa2, 0x00, 0xb5, 0x78, 0x53, + 0xeb, 0xb6, 0x84, 0x53, 0x8a, 0x32, 0x10, 0x85, 0xe0, 0x28, 0x74, 0x84, 0xd0, 0x9f, 0xa8, 0x44, + 0x26, 0x8b, 0x26, 0xee, 0x19, 0x7a, 0xd7, 0xe6, 0x1c, 0x62, 0x63, 0x70, 0xc8, 0x38, 0x44, 0x8c, + 0x4d, 0x0d, 0x16, 0xc5, 0x9c, 0x23, 0x54, 0x64, 0xad, 0xce, 0x52, 0x13, 0xbb, 0x39, 0x7c, 0xd6, + 0x71, 0x74, 0x9b, 0x6d, 0xf8, 0x01, 0x96, 0xfc, 0xdb, 0x71, 0x58, 0x08, 0x60, 0xa1, 0xd7, 0x81, + 0xce, 0x4f, 0x9a, 0xde, 0xc5, 0xa6, 0xda, 0xd5, 0x3a, 0xc2, 0xa8, 0xd2, 0x0e, 0x74, 0x4f, 0xeb, + 0x60, 0xb4, 0x0f, 0x0b, 0x26, 0x6e, 0x18, 0x66, 0xd3, 0x11, 0x87, 0x37, 0x2b, 0x6a, 0x52, 0x55, + 0x28, 0xb6, 0xa8, 0xa6, 0x3c, 0xa5, 0x64, 0x4c, 0x1f, 0x04, 0xed, 0x42, 0x46, 0xef, 0x68, 0x2d, + 0xec, 0x32, 0x64, 0xf1, 0x44, 0xd4, 0xa4, 0x5a, 0x21, 0xc8, 0x1e, 0x7e, 0x69, 0xdd, 0x0b, 0x40, + 0x0f, 0x61, 0xb1, 0x69, 0x34, 0xfa, 0x1d, 0xaf, 0xc6, 0xb8, 0xbb, 0x7a, 0x23, 0x82, 0x63, 0x91, + 0xe3, 0x7b, 0x98, 0x66, 0x9b, 0x01, 0x18, 0xfa, 0x0c, 0x96, 0x5c, 0xfd, 0x38, 0x21, 0xe1, 0x18, + 0xf6, 0x89, 0x1c, 0x32, 0x07, 0x86, 0xde, 0x62, 0xbd, 0xca, 0x98, 0x3d, 0xc3, 0xa6, 0x45, 0x84, + 0x9c, 0xa5, 0xfa, 0xce, 0x3a, 0x05, 0x0f, 0x19, 0x7c, 0x0b, 0xdc, 0xe1, 0x24, 0xbf, 0x0b, 0xd9, + 0xa0, 0xb4, 0xe8, 0x1a, 0xa4, 0x8e, 0xf4, 0x36, 0x56, 0x8d, 0xa3, 0x23, 0x0b, 0xdb, 0xb4, 0xdb, + 0xe2, 0x0a, 0x10, 0x50, 0x95, 0x42, 0xe4, 0x7f, 0x2f, 0x41, 0xc6, 0xdf, 0x0d, 0x64, 0xa0, 0xf0, + 0x6e, 0x3c, 0xc1, 0x67, 0xdc, 0xb4, 0xaf, 0x0f, 0xed, 0xc1, 0xcf, 0xf0, 0x99, 0x32, 0x67, 0x8a, + 0x9f, 0xe8, 0x23, 0xe2, 0xf1, 0x48, 0xe4, 0xac, 0x37, 0xb9, 0x01, 0x8c, 0x74, 0xd8, 0x47, 0xec, + 0x07, 0xfa, 0x0c, 0x32, 0x74, 0x46, 0x19, 0xb7, 0xc3, 0xa9, 0xeb, 0x76, 0xac, 0x39, 0x6d, 0x7b, + 0x3f, 0xe5, 0xdb, 0x90, 0xf6, 0x95, 0xa3, 0xcb, 0x30, 0x67, 0x1a, 0xcf, 0x55, 0xbd, 0xdb, 0xc4, + 0xa7, 0x5c, 0x17, 0x49, 0xd3, 0x78, 0x5e, 0x21, 0xdf, 0xf2, 0x06, 0x24, 0xd8, 0xb0, 0xba, 0x00, + 0x09, 0xcb, 0xd6, 0x4c, 0xa1, 0x2d, 0xf6, 0x81, 0xb2, 0x10, 0xc7, 0x5d, 0xd6, 0x9e, 0xb8, 0x42, + 0x7e, 0xca, 0x8f, 0x21, 0xed, 0xb3, 0x37, 0x54, 0x81, 0xcc, 0xa1, 0xd1, 0xa7, 0xee, 0x57, 0x3d, + 0x34, 0x4e, 0x9d, 0x89, 0x43, 0x8e, 0x9a, 0x0a, 0x39, 0xf2, 0x96, 0x71, 0xaa, 0xa4, 0x0f, 0xdd, + 0x0f, 0x6c, 0xc9, 0x1a, 0xa4, 0x3c, 0xa5, 0xa4, 0x72, 0xdb, 0xe8, 0xf1, 0x50, 0x8f, 0xfc, 0x24, + 0x81, 0x45, 0x1b, 0x1f, 0x89, 0x38, 0x8e, 0xfe, 0x26, 0x82, 0x3f, 0xd7, 0x9b, 0xf6, 0x31, 0xd5, + 0x59, 0x42, 0x61, 0x1f, 0x68, 0x05, 0x66, 0x8e, 0xb1, 0xde, 0x3a, 0xb6, 0xa9, 0xbf, 0x4d, 0x28, + 0xfc, 0x4b, 0xfe, 0xf3, 0xd3, 0x80, 0x14, 0xdc, 0xd4, 0x1a, 0x36, 0x6d, 0x85, 0x88, 0xfd, 0x56, + 0x60, 0xa6, 0xa7, 0x99, 0xb8, 0x6b, 0xf3, 0x31, 0xce, 0xbf, 0x48, 0xcf, 0xe8, 0x6c, 0x5e, 0x53, + 0x1b, 0x34, 0xb6, 0xe3, 0x5d, 0xfb, 0xda, 0x38, 0x71, 0xa0, 0x92, 0xd6, 0x7d, 0x2b, 0x95, 0x67, + 0x70, 0x91, 0x8d, 0x6b, 0x93, 0x0a, 0xa0, 0x1b, 0x5d, 0xce, 0x94, 0xc4, 0xec, 0x44, 0x65, 0x9f, + 0x46, 0xda, 0x5b, 0x50, 0xe0, 0x75, 0xfe, 0xc1, 0xf9, 0xf0, 0xfa, 0x96, 0xf5, 0x10, 0xa8, 0x85, + 0x0a, 0xde, 0x38, 0x65, 0x76, 0x92, 0x38, 0xc5, 0x8d, 0x52, 0x72, 0xbf, 0x2f, 0xc1, 0x85, 0xb0, + 0x4a, 0xd1, 0xa7, 0x93, 0xc7, 0xa9, 0x24, 0xfc, 0x71, 0xa6, 0xd7, 0x5b, 0xc4, 0x7f, 0x12, 0x96, + 0xaa, 0xd6, 0x6e, 0xab, 0x36, 0x3e, 0x65, 0xdd, 0x9b, 0x24, 0x8e, 0x8c, 0x15, 0xe4, 0xdb, 0xed, + 0x3a, 0x3e, 0xb5, 0xc9, 0x04, 0xe2, 0xd5, 0x5c, 0xdb, 0x30, 0xf9, 0x38, 0xb9, 0x12, 0xe9, 0xf7, + 0xdb, 0x86, 0x49, 0xdc, 0xab, 0x23, 0x71, 0xdb, 0x30, 0xb7, 0x92, 0x30, 0x63, 0x6b, 0x66, 0x0b, + 0xdb, 0x72, 0x01, 0x12, 0x14, 0x44, 0x2c, 0xcd, 0xc4, 0x4d, 0x2a, 0x7d, 0x4c, 0x21, 0x3f, 0x89, + 0x55, 0xb5, 0x4c, 0x8c, 0x99, 0x2f, 0x8f, 0x29, 0xec, 0x83, 0xd8, 0xdf, 0xa1, 0x08, 0x1f, 0x63, + 0x0a, 0xfd, 0x2d, 0x37, 0x60, 0xc9, 0xd7, 0x3f, 0x56, 0xcf, 0xe8, 0x5a, 0x98, 0xcc, 0x1e, 0xac, + 0x5e, 0xdc, 0x54, 0x69, 0xb7, 0x50, 0xee, 0xf3, 0xa2, 0x4d, 0xb8, 0x49, 0xd1, 0x09, 0x1a, 0x3e, + 0xb5, 0x4d, 0x86, 0xe7, 0x34, 0x7e, 0x4e, 0x49, 0x3b, 0x50, 0xd2, 0x74, 0xf9, 0xff, 0xc6, 0x60, + 0xb5, 0x88, 0xf5, 0x26, 0xee, 0xda, 0xfa, 0xd1, 0x19, 0xef, 0xa1, 0x51, 0xc6, 0x5b, 0x87, 0xc5, + 0xa6, 0x43, 0xe3, 0xb7, 0xdf, 0x48, 0xc7, 0xef, 0xad, 0x83, 0x98, 0x54, 0xb6, 0x19, 0x80, 0x84, + 0x0c, 0x89, 0xf8, 0xf9, 0x87, 0xc4, 0x07, 0x30, 0x4d, 0xad, 0x92, 0x05, 0x45, 0xf2, 0xf0, 0xf9, + 0x9b, 0x5a, 0x24, 0xc5, 0x47, 0x9b, 0xb0, 0x2c, 0x84, 0xb0, 0x71, 0xa7, 0xd7, 0x26, 0xd1, 0x11, + 0x9d, 0xa2, 0x13, 0x54, 0x03, 0x4b, 0xbc, 0xb0, 0xce, 0xcb, 0xe8, 0x44, 0xfd, 0x21, 0xac, 0x7a, + 0xd4, 0xe1, 0x27, 0x9b, 0xa1, 0x64, 0x2b, 0x6e, 0xb9, 0x97, 0x52, 0xfe, 0x35, 0x09, 0x2e, 0x85, + 0x68, 0x9f, 0xf7, 0xb4, 0x68, 0x83, 0x34, 0x61, 0x1b, 0x2a, 0x90, 0x34, 0x9e, 0x61, 0xf3, 0x99, + 0x8e, 0x9f, 0xf3, 0x5e, 0xb9, 0x13, 0xe5, 0xef, 0x4d, 0xad, 0x6b, 0x1d, 0x19, 0x66, 0x87, 0x7a, + 0xdc, 0x2a, 0x27, 0x52, 0x1c, 0x72, 0x6a, 0x1e, 0xca, 0x39, 0xcc, 0xc3, 0x7c, 0x61, 0xf3, 0x30, + 0xff, 0x24, 0x99, 0x87, 0x39, 0xc2, 0x3c, 0xcc, 0x68, 0xf3, 0x50, 0xfe, 0x38, 0x9b, 0xc7, 0x7f, + 0x97, 0x60, 0xd9, 0xd5, 0xf3, 0x38, 0xb6, 0xf1, 0x52, 0xe7, 0x3d, 0xa1, 0x81, 0xf8, 0xcb, 0xea, + 0xc5, 0xe9, 0xc8, 0x5e, 0x94, 0x1f, 0xc2, 0x4a, 0xb0, 0xa5, 0xbc, 0x1f, 0x3e, 0x81, 0x19, 0x93, + 0xae, 0x4d, 0x79, 0x4f, 0x8c, 0x68, 0x0a, 0x5b, 0xc7, 0x2a, 0x9c, 0x46, 0xfe, 0xb7, 0x31, 0x58, + 0xaa, 0xf6, 0xed, 0x5e, 0xdf, 0xae, 0xb1, 0xd4, 0x2b, 0x6f, 0xdb, 0x27, 0x22, 0x73, 0x30, 0x9c, + 0xe9, 0x96, 0xde, 0x7a, 0xd0, 0xc7, 0xe6, 0x59, 0x20, 0x83, 0xf0, 0x25, 0xa4, 0x0d, 0xca, 0x54, + 0xb5, 0x1a, 0xc7, 0xb8, 0xa3, 0xf1, 0xd5, 0xe5, 0x77, 0x22, 0xb8, 0x84, 0x08, 0x20, 0x60, 0x94, + 0x5c, 0x99, 0x37, 0x3c, 0x5f, 0xf2, 0x2f, 0x4b, 0x30, 0xef, 0x2d, 0x46, 0x57, 0xe1, 0x52, 0xf5, + 0xa0, 0xbe, 0x7f, 0x50, 0x57, 0x6b, 0x85, 0x72, 0x69, 0x37, 0x1f, 0xc8, 0xbc, 0x2c, 0x42, 0x7a, + 0x2b, 0x5f, 0xab, 0x14, 0xd4, 0x42, 0x75, 0xe7, 0x60, 0x77, 0xaf, 0x96, 0x95, 0xd0, 0x02, 0xa4, + 0xb6, 0x0b, 0x35, 0x07, 0x10, 0x43, 0xcb, 0xb0, 0x58, 0xcc, 0xd7, 0xf3, 0xb5, 0x7a, 0x55, 0x29, + 0x39, 0xe0, 0x38, 0x01, 0x6f, 0x55, 0xb6, 0xd5, 0x07, 0x07, 0x25, 0xe5, 0x91, 0x03, 0x9e, 0x26, + 0xe4, 0xf9, 0x9d, 0x1d, 0x07, 0x90, 0xd8, 0x9a, 0x61, 0xf9, 0x25, 0xb9, 0xe1, 0x66, 0xbb, 0x6a, + 0xb6, 0x66, 0x5b, 0x2f, 0x98, 0xed, 0xba, 0x00, 0x89, 0x86, 0xd1, 0xef, 0xda, 0x3c, 0x50, 0x65, + 0x1f, 0xf2, 0x8f, 0xa6, 0x61, 0x95, 0xf7, 0x66, 0x51, 0xb3, 0xb5, 0x9a, 0xd1, 0x37, 0x1b, 0xb8, + 0x88, 0x6d, 0x4d, 0x6f, 0x5b, 0xa8, 0x43, 0xbc, 0x1f, 0x1d, 0x04, 0xb8, 0xe9, 0x24, 0x2f, 0x99, + 0x91, 0x8f, 0x48, 0xf2, 0x0d, 0xf0, 0x5a, 0x57, 0x04, 0x23, 0x9e, 0xc8, 0x24, 0x6e, 0xd1, 0x0f, + 0x41, 0x7b, 0x8e, 0xf5, 0xb1, 0x51, 0xf0, 0xc1, 0xe4, 0x75, 0x78, 0xed, 0x31, 0xf7, 0x4f, 0x25, + 0xc8, 0x06, 0xab, 0x45, 0x87, 0x70, 0xc9, 0xea, 0x6a, 0x3d, 0xeb, 0xd8, 0xb0, 0xd5, 0xe0, 0xc8, + 0xe1, 0x4a, 0xbd, 0x39, 0xbc, 0x5e, 0x31, 0x96, 0x94, 0x8b, 0x82, 0x51, 0xa0, 0x00, 0xdd, 0x03, + 0x78, 0x62, 0x1c, 0xfa, 0x7d, 0xfb, 0x1b, 0xc3, 0x99, 0xde, 0x37, 0x0e, 0xb9, 0x63, 0x98, 0x7b, + 0x22, 0x7e, 0xe6, 0xfe, 0xbe, 0x04, 0x33, 0x3c, 0x57, 0xf4, 0x06, 0x2c, 0xf4, 0x4c, 0xa3, 0x81, + 0x2d, 0x0b, 0x37, 0x55, 0x12, 0x70, 0x5a, 0x7c, 0x11, 0x92, 0x71, 0xc0, 0x34, 0x75, 0x48, 0x1c, + 0x82, 0x6d, 0xd8, 0x5a, 0x5b, 0xc5, 0x96, 0xad, 0x77, 0x34, 0xdb, 0x41, 0x67, 0xdd, 0xbe, 0x44, + 0x0b, 0x4b, 0xa2, 0x8c, 0xd1, 0xec, 0xc0, 0x82, 0x63, 0x58, 0xaa, 0x45, 0x6c, 0x8d, 0xa7, 0x72, + 0x5f, 0x1b, 0x61, 0x5e, 0xd4, 0x2e, 0x89, 0x2b, 0xf3, 0x7c, 0xca, 0xbf, 0x2a, 0xc1, 0x92, 0x40, + 0x28, 0x62, 0xab, 0x61, 0xea, 0x54, 0xf5, 0x24, 0x30, 0xf4, 0x64, 0x08, 0xe8, 0x6f, 0xf4, 0x2a, + 0xcc, 0x37, 0x75, 0xab, 0xd7, 0xd6, 0xce, 0x98, 0xd7, 0x62, 0x81, 0x5d, 0x8a, 0xc3, 0xe8, 0x9c, + 0xb3, 0x0b, 0xf3, 0x56, 0xbf, 0xd7, 0x33, 0x4c, 0xd6, 0x14, 0x2a, 0x59, 0x66, 0x73, 0x6d, 0x94, + 0x64, 0x82, 0x64, 0xeb, 0x4c, 0x49, 0x59, 0xee, 0x87, 0x5c, 0x83, 0x0b, 0x3b, 0xba, 0x65, 0x3b, + 0xf9, 0x79, 0xe1, 0xe5, 0x6f, 0x40, 0xba, 0xad, 0x75, 0x5b, 0x7d, 0xb2, 0xf6, 0x68, 0x18, 0x4d, + 0x21, 0xe6, 0xbc, 0x00, 0x16, 0x8c, 0x26, 0x26, 0x53, 0xc1, 0x91, 0xde, 0xb6, 0xb1, 0xc9, 0x05, + 0xe5, 0x5f, 0xf2, 0x21, 0x2c, 0x07, 0x98, 0x72, 0x87, 0x5a, 0x09, 0xd9, 0x78, 0x19, 0x25, 0xba, + 0x47, 0x67, 0x9e, 0x3d, 0x18, 0xf9, 0xbf, 0x49, 0xb0, 0xac, 0xe8, 0xd6, 0x49, 0xbe, 0xab, 0xb5, + 0xcf, 0x2c, 0xdd, 0x72, 0x2c, 0x86, 0x4c, 0x44, 0x9c, 0x95, 0xda, 0xc1, 0xb6, 0xa9, 0x37, 0x46, + 0x38, 0xda, 0x7d, 0xf6, 0xb9, 0x4b, 0x71, 0x95, 0x74, 0xcf, 0xfb, 0x89, 0xb6, 0x61, 0xde, 0xa2, + 0x83, 0x4a, 0x65, 0x3e, 0x3b, 0x36, 0xbe, 0xcf, 0x56, 0x52, 0x8c, 0x92, 0xe5, 0x66, 0xbf, 0x03, + 0xb3, 0x6c, 0x45, 0x21, 0x8c, 0xe9, 0x6a, 0x04, 0x8f, 0x3c, 0xc5, 0x52, 0x04, 0xb6, 0xfc, 0x2f, + 0x32, 0x90, 0xf6, 0x89, 0x88, 0x9e, 0xc2, 0x4a, 0xb7, 0xdf, 0xc1, 0xa6, 0xde, 0xd0, 0xda, 0xcc, + 0x3e, 0xc5, 0xd8, 0x62, 0x0d, 0xfd, 0x68, 0x9c, 0x86, 0xae, 0xef, 0x09, 0x16, 0xd4, 0x4c, 0x99, + 0xee, 0xca, 0x53, 0xca, 0x85, 0x6e, 0x08, 0x1c, 0x3d, 0x87, 0xd5, 0x86, 0x66, 0xe3, 0x96, 0x11, + 0x52, 0x29, 0x53, 0xc9, 0xc7, 0x63, 0x55, 0x5a, 0x70, 0x99, 0xf8, 0xab, 0x5d, 0x69, 0x84, 0x96, + 0x20, 0x0c, 0xe8, 0x44, 0xd5, 0xba, 0x46, 0xf7, 0xac, 0xa3, 0xdb, 0x67, 0x7e, 0x1f, 0xf2, 0xfe, + 0x58, 0x55, 0x7e, 0x96, 0x17, 0xd4, 0x4e, 0x65, 0xd9, 0x93, 0x00, 0x8c, 0x54, 0xd3, 0x56, 0x9b, + 0x3a, 0xcd, 0x22, 0xb9, 0xd5, 0x4c, 0x4f, 0x50, 0xcd, 0x4e, 0x51, 0x50, 0xbb, 0xd5, 0xb4, 0x03, + 0x30, 0x64, 0xc2, 0xc5, 0x13, 0xb5, 0xa3, 0xf5, 0x84, 0x37, 0x72, 0xd7, 0xf3, 0x3c, 0xbb, 0x36, + 0x5e, 0xd7, 0x7d, 0xb6, 0xab, 0xf5, 0x4a, 0x0e, 0x07, 0xb7, 0xeb, 0x4e, 0x42, 0xe0, 0xb9, 0x1d, + 0xb8, 0x10, 0xd6, 0xd5, 0xe8, 0x3d, 0x48, 0xd0, 0x64, 0x12, 0x37, 0x9a, 0x51, 0x99, 0x27, 0x86, + 0x9c, 0xdb, 0x83, 0x95, 0xf0, 0x3e, 0x3c, 0x27, 0xbf, 0x2a, 0x64, 0x83, 0x1d, 0x84, 0x3e, 0x86, + 0xb9, 0xa7, 0x7d, 0xcd, 0xd2, 0x55, 0xbd, 0x39, 0xee, 0x46, 0x46, 0x92, 0x12, 0x54, 0x9a, 0x56, + 0xee, 0xd7, 0x25, 0xc8, 0x06, 0xfb, 0xe2, 0x85, 0x38, 0xa2, 0x2a, 0x2c, 0x59, 0xb8, 0x6b, 0xe9, + 0xb6, 0xfe, 0x0c, 0xab, 0x9a, 0x6d, 0x9b, 0xfa, 0x61, 0xdf, 0xc6, 0x63, 0x26, 0xec, 0x90, 0x43, + 0x9a, 0x17, 0x94, 0xb9, 0x6f, 0x66, 0xe0, 0x42, 0x58, 0x17, 0xa2, 0xc3, 0x41, 0x31, 0x4b, 0xe7, + 0x36, 0x88, 0xf5, 0xba, 0xd6, 0x6a, 0xe1, 0x26, 0x15, 0xc4, 0xd3, 0x9a, 0x6b, 0x90, 0x32, 0x71, + 0x8b, 0x19, 0x5e, 0x53, 0xcc, 0x30, 0xc0, 0x40, 0xd4, 0xa9, 0x5b, 0x90, 0xd5, 0xfa, 0xa7, 0x7a, + 0x5b, 0xd7, 0xcc, 0x33, 0xe6, 0xf4, 0x84, 0xc7, 0x2a, 0x9f, 0x5f, 0x96, 0xbc, 0xe0, 0xc8, 0x3c, + 0xe3, 0x82, 0xe6, 0xfb, 0xb6, 0x72, 0xff, 0x49, 0x82, 0x94, 0x47, 0xde, 0xf3, 0x19, 0x93, 0x3f, + 0xaf, 0x14, 0x9b, 0x3c, 0xaf, 0x74, 0x0d, 0x80, 0x6f, 0x77, 0xdb, 0x5a, 0xcb, 0xd9, 0xf0, 0x9b, + 0x63, 0xb0, 0xba, 0x46, 0x6c, 0x9c, 0x20, 0x63, 0xd3, 0xc4, 0x4d, 0xee, 0x1c, 0x56, 0x06, 0x92, + 0xd6, 0xa5, 0x4e, 0xcf, 0x3e, 0xe3, 0x6c, 0x29, 0xe6, 0x56, 0x02, 0xe2, 0xb6, 0xd6, 0xca, 0xfd, + 0xcf, 0x18, 0x64, 0xfc, 0x7a, 0x40, 0x77, 0xc5, 0x52, 0x20, 0x3e, 0xc1, 0xb4, 0xc2, 0x17, 0x02, + 0xe6, 0xa0, 0xb1, 0x1c, 0xbc, 0xac, 0x0e, 0x5a, 0x7f, 0xc0, 0xcc, 0x25, 0x68, 0x3c, 0xbb, 0x80, + 0x4c, 0xdc, 0xd6, 0xe8, 0x48, 0x38, 0xa2, 0x01, 0x6b, 0xb7, 0x71, 0x36, 0xe6, 0x48, 0x58, 0x14, + 0x94, 0xf7, 0x04, 0x61, 0xae, 0x01, 0xf3, 0xde, 0x8a, 0xce, 0xd9, 0xeb, 0x57, 0x7d, 0xbd, 0xc6, + 0x0c, 0xda, 0xed, 0x33, 0x67, 0xfd, 0xf0, 0xbf, 0x5e, 0x81, 0x2b, 0x34, 0x58, 0xf8, 0x1a, 0xbb, + 0xa1, 0x32, 0x89, 0x20, 0x44, 0x78, 0xff, 0x15, 0x59, 0xcd, 0x8b, 0xf0, 0xfe, 0x05, 0x22, 0x88, + 0x15, 0x87, 0x8b, 0x7f, 0xda, 0x7e, 0x0c, 0x6e, 0x89, 0x7a, 0xee, 0xa0, 0xe2, 0x82, 0xc3, 0xa3, + 0xe6, 0x89, 0x2e, 0xbe, 0x27, 0x0d, 0xc6, 0x04, 0xbe, 0xc5, 0x43, 0xd4, 0xd8, 0x1d, 0xa6, 0x91, + 0x40, 0x88, 0xc0, 0x42, 0xef, 0xc1, 0x10, 0x81, 0x87, 0xe4, 0x7f, 0x4e, 0x0a, 0x8b, 0x11, 0xb8, + 0x10, 0x6c, 0xb0, 0xdc, 0x3f, 0x8f, 0x10, 0xc1, 0xe9, 0xc6, 0x11, 0x63, 0x20, 0x64, 0xe0, 0x82, + 0xd8, 0xfe, 0x90, 0x81, 0x4b, 0xc0, 0xe6, 0xd7, 0xe2, 0x79, 0x24, 0x70, 0x27, 0x28, 0xa7, 0x6e, + 0x4f, 0x04, 0xe1, 0xd6, 0xea, 0x8d, 0x20, 0x78, 0xad, 0x33, 0xe7, 0xaf, 0xd5, 0x9d, 0xc4, 0xdc, + 0x5a, 0xdb, 0x01, 0x18, 0xfa, 0x05, 0x29, 0x24, 0xa2, 0xe0, 0x75, 0xcf, 0x9e, 0xbf, 0xe3, 0xfd, + 0x2e, 0xc2, 0xed, 0xf8, 0x93, 0x10, 0x78, 0xee, 0xc7, 0x52, 0x30, 0xc2, 0xe0, 0xc2, 0x7d, 0x04, + 0x73, 0x1d, 0xbd, 0xab, 0xb2, 0xe3, 0x13, 0xc3, 0x77, 0x7e, 0xd9, 0xd1, 0x80, 0x64, 0x47, 0xef, + 0xd2, 0x5f, 0x94, 0x54, 0x3b, 0xe5, 0xa4, 0xb1, 0xb1, 0x48, 0xb5, 0x53, 0x46, 0x5a, 0x82, 0x85, + 0xa7, 0x7d, 0xad, 0x6b, 0xeb, 0x6d, 0xac, 0xf2, 0x63, 0x09, 0xd3, 0x63, 0x1c, 0x4b, 0xc8, 0x08, + 0x22, 0xfa, 0x69, 0xe5, 0xbe, 0x3f, 0x3d, 0x18, 0xe9, 0xf0, 0x76, 0xfd, 0x43, 0x09, 0x5e, 0xa5, + 0x9c, 0x5d, 0x1f, 0xa8, 0x1e, 0xeb, 0x96, 0x6d, 0xb4, 0x4c, 0xad, 0xa3, 0x1e, 0xf6, 0x1b, 0x27, + 0xd8, 0x16, 0xfb, 0x33, 0x4f, 0x5e, 0x9e, 0xc9, 0x0f, 0x80, 0xcb, 0xa2, 0xce, 0x2d, 0x5a, 0xa5, + 0xf2, 0x0a, 0x15, 0xca, 0x71, 0xaf, 0x81, 0x62, 0x2b, 0xf7, 0x8f, 0x62, 0x70, 0x6d, 0x04, 0x0f, + 0xf4, 0x5d, 0xb8, 0x1c, 0x6c, 0x5a, 0xdb, 0x78, 0x8e, 0x4d, 0x95, 0x6e, 0xbb, 0xf1, 0x45, 0xf6, + 0xaa, 0xbf, 0xa2, 0x1d, 0x82, 0x40, 0x77, 0xe1, 0xc2, 0xc8, 0xfb, 0xbd, 0x9e, 0x43, 0x1e, 0x0b, + 0x23, 0x3f, 0x20, 0x08, 0x8c, 0xfc, 0x1a, 0xa4, 0x98, 0xfa, 0x54, 0x4b, 0xff, 0x9a, 0xcd, 0x8a, + 0x71, 0x05, 0x18, 0xa8, 0xa6, 0x7f, 0x8d, 0xd1, 0x7d, 0x48, 0x73, 0x04, 0x5f, 0xd7, 0xbe, 0x3e, + 0xac, 0x6b, 0x9d, 0x8a, 0x94, 0x79, 0x46, 0xcb, 0x7a, 0x18, 0xdd, 0x06, 0xe4, 0xe5, 0xa5, 0xb2, + 0x74, 0x50, 0x82, 0xd6, 0x99, 0xf5, 0x60, 0x16, 0x08, 0x3c, 0xf7, 0x4d, 0xc2, 0x1b, 0xa9, 0x72, + 0x4b, 0xf8, 0x4d, 0x09, 0x6e, 0xe0, 0xa7, 0x7d, 0xfd, 0x99, 0xd6, 0xc6, 0xdd, 0x06, 0x56, 0x1b, + 0x6d, 0xcd, 0xb2, 0x22, 0x6d, 0xe1, 0xab, 0x97, 0xe1, 0x7c, 0x3c, 0x80, 0x60, 0xff, 0x5f, 0xf7, + 0x88, 0x52, 0x20, 0x92, 0x0c, 0x58, 0xc0, 0xaf, 0x48, 0x90, 0x73, 0xe9, 0x4b, 0x01, 0x74, 0x74, + 0x0f, 0xb2, 0x4e, 0x48, 0xa1, 0x4e, 0x70, 0xa4, 0x27, 0x23, 0x02, 0x04, 0xae, 0xd9, 0xf7, 0x60, + 0x65, 0x50, 0x2b, 0xb4, 0x47, 0x99, 0x01, 0x5c, 0x08, 0x0a, 0x4a, 0xfa, 0x36, 0xf7, 0x4b, 0x71, + 0xb8, 0x14, 0xd9, 0x38, 0x74, 0x1f, 0xe4, 0x70, 0x9e, 0x21, 0xf6, 0xf9, 0x4a, 0x18, 0x7f, 0x8f, + 0x95, 0x46, 0xf3, 0x1a, 0x34, 0xd6, 0x50, 0x5e, 0x93, 0x98, 0xec, 0x9f, 0x91, 0xc2, 0x6d, 0x56, + 0x7d, 0xc9, 0xd6, 0x10, 0xec, 0xcd, 0x17, 0xb2, 0xf6, 0xbf, 0x39, 0xeb, 0x5d, 0x45, 0x71, 0x6b, + 0xff, 0x5d, 0x09, 0xde, 0x72, 0x57, 0x42, 0xe3, 0x7a, 0xc0, 0xaf, 0x5e, 0xc6, 0xe4, 0xe7, 0x01, + 0x04, 0xad, 0xfe, 0x0d, 0x47, 0xa4, 0x87, 0xc3, 0xdd, 0xdf, 0x6f, 0xc5, 0x20, 0xe7, 0xb2, 0xf9, + 0xe3, 0x65, 0xfc, 0x28, 0x0f, 0x57, 0xbb, 0xfd, 0x8e, 0xda, 0xd4, 0x2d, 0x5b, 0xef, 0x36, 0x6c, + 0x35, 0xa0, 0x67, 0x8b, 0x1b, 0x56, 0xae, 0xdb, 0xef, 0x14, 0x39, 0x4e, 0xcd, 0xd7, 0x6e, 0x0b, + 0x7d, 0x0e, 0x17, 0x6c, 0xa3, 0x37, 0x48, 0x39, 0x91, 0x8b, 0x44, 0xb6, 0xd1, 0x0b, 0x30, 0xce, + 0xfd, 0x20, 0x0e, 0x97, 0x22, 0xf5, 0x8f, 0xf6, 0xe1, 0xf5, 0x68, 0xa3, 0x18, 0x1c, 0x9b, 0xaf, + 0x46, 0x74, 0x97, 0x67, 0x78, 0x0e, 0xe5, 0x38, 0x38, 0x42, 0xa3, 0x38, 0xfe, 0x91, 0x0d, 0xd2, + 0x21, 0xc6, 0xfb, 0x52, 0x07, 0xe9, 0x5f, 0x4b, 0x04, 0xf3, 0x08, 0x7c, 0xa0, 0xfe, 0x2d, 0x09, + 0x72, 0x03, 0x51, 0xa1, 0x33, 0x3e, 0xb9, 0x55, 0x1f, 0xbd, 0xac, 0xc0, 0x30, 0x00, 0x0c, 0x8e, + 0xcf, 0x8b, 0x27, 0xe1, 0xc5, 0xb9, 0xbf, 0x22, 0xc1, 0x65, 0x3f, 0x29, 0x5f, 0x0c, 0x72, 0x65, + 0xbc, 0xac, 0x01, 0xb9, 0x01, 0x4b, 0x6e, 0xf2, 0xdf, 0x59, 0x17, 0x70, 0xe3, 0x41, 0x4e, 0x91, + 0xe3, 0x48, 0x73, 0xff, 0x3c, 0x06, 0x57, 0x87, 0xb6, 0x09, 0xdd, 0x80, 0x34, 0x89, 0x6c, 0x5d, + 0x66, 0xcc, 0xb6, 0xe7, 0x3b, 0x7a, 0xd7, 0x61, 0x43, 0x91, 0xb4, 0xd3, 0x81, 0x1a, 0xe7, 0x3b, + 0xda, 0xa9, 0x8b, 0x14, 0xb0, 0xcc, 0xc4, 0x80, 0x65, 0xfe, 0xd2, 0x80, 0x65, 0xb2, 0xf3, 0xf7, + 0xcd, 0x6f, 0xa9, 0xfb, 0x7c, 0x7d, 0x30, 0x96, 0x79, 0xce, 0x86, 0x9b, 0xe7, 0x56, 0x52, 0xec, + 0x5f, 0xc9, 0x8f, 0x21, 0xe3, 0x1f, 0x9a, 0x68, 0x53, 0x9c, 0xaa, 0x1e, 0x67, 0x59, 0xc0, 0x4f, + 0x5c, 0x87, 0xef, 0xd8, 0xfd, 0x5a, 0x1c, 0x12, 0x2c, 0xf0, 0x7f, 0x1d, 0xd2, 0x7a, 0xd7, 0xc6, + 0x2d, 0x6c, 0x7a, 0x96, 0x1c, 0xf1, 0xf2, 0x94, 0x32, 0xcf, 0xc1, 0x0c, 0xed, 0x55, 0x48, 0x1d, + 0xb5, 0x0d, 0xcd, 0xf6, 0x2c, 0x2e, 0xa4, 0xf2, 0x94, 0x02, 0x14, 0xc8, 0x50, 0x6e, 0xc0, 0xbc, + 0x65, 0x9b, 0x7a, 0xb7, 0xa5, 0xfa, 0x8f, 0x7e, 0xa7, 0x18, 0xd4, 0xa9, 0xee, 0xd0, 0x30, 0xda, + 0x58, 0x13, 0x2b, 0x9c, 0x69, 0x7e, 0x04, 0x69, 0x9e, 0x83, 0x9d, 0xe5, 0x88, 0x73, 0xd0, 0x91, + 0x23, 0x26, 0x46, 0x1d, 0x77, 0x2c, 0x4f, 0x29, 0x19, 0x87, 0x88, 0xb1, 0xf9, 0x0e, 0x00, 0x81, + 0x70, 0x0e, 0x33, 0xfe, 0xdc, 0x93, 0x7d, 0xd6, 0xc3, 0x94, 0xba, 0x7a, 0x54, 0xd4, 0xce, 0xca, + 0x53, 0xca, 0x1c, 0xc1, 0x65, 0x84, 0x9b, 0x00, 0x4d, 0xcd, 0x16, 0x84, 0x6c, 0x4d, 0xb8, 0xe8, + 0x23, 0x2c, 0x6a, 0x36, 0x26, 0x34, 0x04, 0x8d, 0xd1, 0x14, 0x60, 0xb1, 0xa9, 0x9d, 0xa9, 0xc6, + 0x91, 0xfa, 0x1c, 0xe3, 0x13, 0x4e, 0x9a, 0xa4, 0xfb, 0xcc, 0x2b, 0x01, 0xd2, 0xb3, 0xea, 0xd1, + 0xe7, 0x18, 0x9f, 0x10, 0x89, 0x9b, 0xe2, 0x83, 0x32, 0x71, 0xf2, 0x2e, 0x3f, 0x03, 0x73, 0xce, + 0x29, 0x61, 0xf4, 0x29, 0x3d, 0xb8, 0xce, 0x8f, 0x25, 0x0f, 0xcf, 0xd0, 0x15, 0xf9, 0x79, 0xe4, + 0xf2, 0x94, 0x92, 0x6c, 0xf2, 0xdf, 0x5b, 0x19, 0x98, 0xef, 0x69, 0xa6, 0x85, 0x9b, 0xec, 0x8a, + 0x8b, 0xfc, 0x17, 0x63, 0x90, 0x14, 0x88, 0xe8, 0x75, 0x7a, 0x87, 0x40, 0xd8, 0xd4, 0x60, 0x23, + 0xe9, 0xb5, 0x02, 0x8c, 0x3e, 0x80, 0x94, 0xa7, 0x75, 0xfc, 0xea, 0x4e, 0x44, 0xbb, 0x88, 0x56, + 0xf8, 0x4f, 0xb4, 0x06, 0xd3, 0x54, 0xec, 0xf8, 0x30, 0xe5, 0x2b, 0x14, 0x07, 0x95, 0x80, 0x76, + 0x81, 0xfa, 0xb5, 0xd1, 0x15, 0x37, 0x04, 0x6e, 0x8d, 0x68, 0x27, 0xe5, 0xf1, 0xd8, 0xe8, 0x62, + 0x25, 0x69, 0xf3, 0x5f, 0xb9, 0x77, 0x20, 0x29, 0xa0, 0xe8, 0x75, 0xc8, 0xb0, 0xc3, 0xa9, 0x6a, + 0x47, 0xef, 0xf6, 0xc5, 0x8e, 0x67, 0x42, 0x49, 0x33, 0xe8, 0x2e, 0x03, 0xca, 0xff, 0x5b, 0x82, + 0x6c, 0xf0, 0xcc, 0x0d, 0x6a, 0xc3, 0x25, 0x77, 0x47, 0xd3, 0xf6, 0x9d, 0xfd, 0xb0, 0xb8, 0xba, + 0xd6, 0x47, 0x24, 0x4a, 0xfd, 0x27, 0x46, 0xac, 0xf2, 0x94, 0x72, 0x51, 0x0f, 0x2f, 0x42, 0x18, + 0x56, 0xf8, 0xb9, 0xd8, 0x60, 0x55, 0xac, 0xc7, 0x6f, 0x0f, 0x3d, 0x23, 0x3b, 0x58, 0xd1, 0xb2, + 0x19, 0x56, 0xb0, 0x95, 0x85, 0x8c, 0x9f, 0xbf, 0xfc, 0x93, 0x59, 0xb8, 0xb8, 0x6f, 0xea, 0x1d, + 0x1a, 0x0c, 0xf8, 0xd1, 0x91, 0x02, 0x19, 0x13, 0xf7, 0xda, 0x1a, 0x09, 0xc9, 0xbc, 0x9b, 0x65, + 0x6f, 0x46, 0x0a, 0x43, 0x91, 0xb9, 0x3f, 0xe3, 0x3b, 0x2c, 0x69, 0xce, 0x82, 0xab, 0xf5, 0x3e, + 0xf0, 0xa3, 0x79, 0xfe, 0xad, 0xb0, 0x1b, 0x43, 0xcf, 0x64, 0x3a, 0xcc, 0xe6, 0x4d, 0xcf, 0x37, + 0xfa, 0x53, 0xb0, 0xdc, 0x38, 0xd6, 0xe8, 0xf9, 0x3d, 0x93, 0xde, 0xa4, 0xf3, 0xef, 0x75, 0x45, + 0xed, 0x92, 0x16, 0x04, 0xcd, 0xae, 0x66, 0x9d, 0x38, 0xac, 0x97, 0x1a, 0x83, 0x60, 0x64, 0xc3, + 0xd5, 0x86, 0x79, 0xd6, 0xb3, 0x0d, 0x55, 0x28, 0xe2, 0xe8, 0xe8, 0x54, 0x3d, 0xea, 0x61, 0xff, + 0x76, 0x57, 0xd4, 0x0d, 0x9e, 0x02, 0xa5, 0xe5, 0x6a, 0xb9, 0x77, 0x74, 0x7a, 0xaf, 0xe7, 0xea, + 0xe5, 0x52, 0x23, 0xaa, 0x10, 0xf5, 0xe0, 0xf2, 0x91, 0x7e, 0x8a, 0x9b, 0x6c, 0x7d, 0xc5, 0x26, + 0x09, 0xe2, 0x59, 0x7d, 0xdb, 0x5e, 0x1b, 0x91, 0x99, 0xde, 0x53, 0xdc, 0x24, 0xd3, 0xe0, 0x96, + 0xa0, 0x73, 0xaa, 0x5c, 0x3d, 0x8a, 0x28, 0x43, 0x35, 0xc8, 0x0e, 0x54, 0x33, 0x33, 0xfc, 0x08, + 0xeb, 0x00, 0xf7, 0x85, 0xc3, 0x00, 0x53, 0x1b, 0xae, 0x0a, 0xad, 0x3d, 0xd7, 0xed, 0x63, 0xf7, + 0x96, 0x97, 0xa8, 0x61, 0x76, 0xa8, 0xf2, 0xb8, 0x66, 0x3e, 0xd7, 0xed, 0x63, 0x31, 0xa0, 0x5c, + 0xe5, 0x99, 0x51, 0x85, 0xe8, 0x01, 0x64, 0xa9, 0x1b, 0xe9, 0x69, 0xa6, 0x63, 0x63, 0xc9, 0xa1, + 0x37, 0x05, 0x88, 0xbb, 0xd8, 0xd7, 0x4c, 0xd7, 0xca, 0xe8, 0x44, 0xe2, 0x42, 0xd0, 0xe7, 0x80, + 0xb8, 0x15, 0x1c, 0x6b, 0xd6, 0xb1, 0x60, 0x3a, 0x37, 0xf4, 0x50, 0x06, 0xeb, 0xfa, 0xb2, 0x66, + 0x1d, 0xbb, 0x7b, 0x9b, 0x8d, 0x00, 0x8c, 0x1e, 0x1d, 0x25, 0xae, 0xdd, 0x3a, 0xd6, 0x8f, 0x1c, + 0x61, 0x53, 0x43, 0xf5, 0x4e, 0x5c, 0x5f, 0x8d, 0xa0, 0xbb, 0x7a, 0x6f, 0xfa, 0x41, 0x21, 0x83, + 0xfc, 0x1b, 0x09, 0x32, 0xfe, 0x56, 0xa2, 0x87, 0xb0, 0x40, 0x35, 0x64, 0x1b, 0x2a, 0x3f, 0x03, + 0xcb, 0x6f, 0xa3, 0xad, 0x8f, 0xa5, 0x25, 0xe7, 0x53, 0x49, 0x13, 0x36, 0x75, 0xa3, 0xc4, 0x98, + 0xc8, 0xdf, 0x93, 0x98, 0xff, 0x25, 0x65, 0xe8, 0x12, 0x2c, 0xd7, 0x2b, 0xbb, 0x25, 0x75, 0x3f, + 0xaf, 0xd4, 0x03, 0xa7, 0xa0, 0x92, 0x30, 0xfd, 0xa8, 0x94, 0x57, 0xb2, 0x12, 0x9a, 0x83, 0xc4, + 0x6e, 0x75, 0xaf, 0x5e, 0xce, 0xc6, 0x50, 0x16, 0xe6, 0x8b, 0xf9, 0x47, 0x6a, 0xf5, 0x9e, 0xca, + 0x20, 0x71, 0xb4, 0x00, 0x29, 0x0e, 0xf9, 0xbc, 0x54, 0xfa, 0x2c, 0x3b, 0x4d, 0x50, 0xc8, 0x2f, + 0x02, 0xa1, 0xf4, 0x09, 0x82, 0x52, 0xae, 0x1e, 0x28, 0x04, 0x52, 0xcc, 0x3f, 0xca, 0xce, 0xc8, + 0x35, 0xc8, 0x06, 0xb5, 0x8f, 0x7e, 0x0a, 0x80, 0x77, 0xe1, 0xe8, 0x7b, 0x07, 0x8c, 0x98, 0xde, + 0x3b, 0x68, 0x88, 0x9f, 0x72, 0x15, 0xd0, 0xa0, 0x7b, 0x43, 0x1f, 0xc1, 0x5c, 0x17, 0x3f, 0x9f, + 0x24, 0x5d, 0xdb, 0xc5, 0xcf, 0xe9, 0x2f, 0xf9, 0x32, 0x5c, 0x8a, 0xb4, 0x70, 0x39, 0x03, 0xf3, + 0x5e, 0xcf, 0x27, 0xff, 0x24, 0x06, 0x69, 0xe2, 0xb6, 0xac, 0xba, 0x51, 0x69, 0x75, 0x0d, 0x13, + 0xa3, 0x75, 0x40, 0x8e, 0xc3, 0xb2, 0x48, 0x2f, 0x5a, 0x27, 0x3a, 0x3b, 0xc4, 0x3f, 0x47, 0x4d, + 0xcd, 0x29, 0xab, 0x1b, 0xb5, 0x13, 0xbd, 0x87, 0xce, 0xe0, 0x72, 0xc3, 0xe8, 0x74, 0x8c, 0xae, + 0xea, 0x27, 0xd3, 0x29, 0x3b, 0x3e, 0xa3, 0x7f, 0x38, 0xc4, 0x63, 0x3a, 0x55, 0xaf, 0x17, 0x28, + 0x1f, 0x1f, 0x8c, 0x38, 0x97, 0x86, 0x03, 0x16, 0x15, 0xb3, 0x32, 0xf9, 0x87, 0x12, 0x2c, 0x85, + 0xd0, 0xa0, 0x9b, 0x20, 0x17, 0xaa, 0xbb, 0xbb, 0xd5, 0x3d, 0xb5, 0x50, 0xce, 0x2b, 0x35, 0xb5, + 0x5e, 0x55, 0x2b, 0xdb, 0x7b, 0x55, 0x25, 0x78, 0x55, 0x31, 0x05, 0xb3, 0x7b, 0x07, 0xbb, 0x25, + 0xa5, 0x52, 0xc8, 0x4a, 0xe8, 0x02, 0x64, 0xf3, 0x3b, 0xfb, 0xe5, 0xbc, 0x7a, 0xb0, 0xbf, 0x5f, + 0x52, 0xd4, 0x42, 0xbe, 0x56, 0xca, 0xc6, 0x5c, 0xe8, 0x4e, 0xf5, 0x73, 0x01, 0xa5, 0xc6, 0xb3, + 0x7f, 0xb0, 0x57, 0xa8, 0x1f, 0xe4, 0xeb, 0x95, 0xea, 0x5e, 0x76, 0x1a, 0x65, 0x00, 0x3e, 0x2f, + 0x57, 0xea, 0xa5, 0xda, 0x7e, 0xbe, 0x50, 0xca, 0x26, 0xb6, 0xe6, 0x01, 0x5c, 0x6d, 0xc8, 0xff, + 0x95, 0xc8, 0x19, 0x32, 0x09, 0xbc, 0x05, 0x8b, 0x64, 0x72, 0xa1, 0xae, 0x51, 0x14, 0xf3, 0xb3, + 0x3d, 0x59, 0x5e, 0xe0, 0x90, 0xa1, 0xd7, 0x20, 0xd3, 0xed, 0x77, 0x0e, 0xb1, 0x49, 0x94, 0x4b, + 0x4a, 0xf9, 0x2d, 0x8a, 0x79, 0x06, 0xad, 0x1b, 0x84, 0x31, 0x59, 0xe7, 0x98, 0x98, 0xac, 0x6f, + 0xb1, 0x6a, 0x98, 0x4d, 0xcc, 0x4e, 0xd8, 0x27, 0xc9, 0xf4, 0x46, 0x81, 0x55, 0x02, 0x43, 0x0f, + 0xe1, 0x42, 0x68, 0x5f, 0x4d, 0x0f, 0x3d, 0x58, 0xe5, 0xd3, 0xb1, 0x82, 0x1a, 0x83, 0xfd, 0xf1, + 0x3b, 0x12, 0xac, 0x46, 0xcd, 0x12, 0xe8, 0xbb, 0x90, 0x0a, 0x26, 0x20, 0x46, 0xd9, 0x34, 0xb4, + 0xbd, 0xc9, 0xec, 0x54, 0x30, 0xdb, 0x30, 0x92, 0xbc, 0x3f, 0x34, 0xe9, 0x20, 0x79, 0x97, 0x76, + 0xf2, 0x2f, 0xc7, 0x60, 0x21, 0x28, 0xf2, 0x36, 0xcc, 0x8a, 0xf4, 0x19, 0x5b, 0xeb, 0xde, 0x19, + 0x6f, 0xce, 0xe2, 0xdf, 0x8a, 0xa0, 0xa6, 0x87, 0xe5, 0xf8, 0x6a, 0x75, 0x1d, 0xe2, 0x1d, 0xbd, + 0x3b, 0x56, 0xf3, 0x09, 0x22, 0xc5, 0xd7, 0x4e, 0xc7, 0x6a, 0x2f, 0x41, 0x44, 0x15, 0x58, 0xe4, + 0x53, 0x18, 0xbd, 0x30, 0xe6, 0xae, 0x99, 0x46, 0x51, 0x67, 0x3d, 0x64, 0xcc, 0x91, 0xfc, 0xde, + 0x34, 0x5c, 0x8a, 0x0c, 0x34, 0x5e, 0xd8, 0xf1, 0xa1, 0x0f, 0x61, 0x96, 0xde, 0xe3, 0xe3, 0x77, + 0x26, 0xc6, 0xb8, 0x6f, 0xc5, 0xd1, 0x91, 0x05, 0x0b, 0xdc, 0xe5, 0x68, 0xed, 0xde, 0xb1, 0x76, + 0x88, 0xd9, 0x9e, 0x66, 0x26, 0x72, 0x7f, 0x2d, 0xb2, 0x15, 0xeb, 0xf7, 0x8e, 0x4e, 0x99, 0x07, + 0xd9, 0xa3, 0xdb, 0xe1, 0x79, 0xce, 0x8f, 0xcc, 0xd5, 0xac, 0x0a, 0x01, 0x41, 0x6f, 0x02, 0xbf, + 0x3e, 0xef, 0x56, 0x9a, 0xe0, 0x4e, 0x31, 0xc3, 0x0a, 0x1c, 0xd4, 0x15, 0x48, 0x98, 0x5a, 0x53, + 0x3f, 0xa5, 0x91, 0x4e, 0xa2, 0x3c, 0xa5, 0xb0, 0x4f, 0x7a, 0x78, 0xa5, 0x6f, 0x9a, 0x46, 0x4b, + 0xb3, 0x3d, 0x37, 0xfe, 0x79, 0x10, 0x31, 0xf2, 0xb8, 0xec, 0xa2, 0x43, 0x2b, 0x40, 0xf2, 0x5f, + 0x96, 0xe0, 0x62, 0x44, 0x0b, 0xd0, 0x1a, 0xdc, 0xbc, 0x77, 0xef, 0x0b, 0x95, 0x3b, 0xc2, 0xbd, + 0x7c, 0xbd, 0xf2, 0xb0, 0xa4, 0x52, 0x5f, 0xb6, 0x55, 0xaa, 0x0f, 0x73, 0x84, 0x64, 0xda, 0x2b, + 0x7d, 0x91, 0x2f, 0x96, 0x0a, 0x95, 0xdd, 0xfc, 0x4e, 0x36, 0x86, 0xae, 0xc0, 0xaa, 0xeb, 0x13, + 0x19, 0x0b, 0x55, 0xa0, 0xc7, 0xd1, 0x22, 0xa4, 0xfd, 0xa0, 0xe9, 0x2d, 0x80, 0xa4, 0xd0, 0x91, + 0xfc, 0x7f, 0x24, 0x98, 0x73, 0xba, 0x1f, 0x55, 0x60, 0x8e, 0x46, 0x10, 0xba, 0x38, 0xb2, 0x1e, + 0x1d, 0xf3, 0xd7, 0x05, 0x9e, 0x43, 0x4d, 0xd7, 0xd2, 0x02, 0x4a, 0x58, 0xf5, 0xbb, 0xcf, 0x4d, + 0xad, 0xd7, 0xc3, 0xc2, 0x1d, 0x44, 0xb1, 0x3a, 0x10, 0x78, 0x3e, 0x56, 0x0e, 0x35, 0xda, 0x85, + 0xd4, 0x49, 0xc7, 0x52, 0x05, 0xb3, 0xe1, 0x41, 0xfe, 0x67, 0x1d, 0xeb, 0xf3, 0x41, 0x6e, 0x70, + 0xe2, 0x80, 0xb7, 0x92, 0x30, 0xc3, 0x4e, 0x14, 0xc8, 0xb7, 0x00, 0x0d, 0x36, 0x23, 0xec, 0xb0, + 0xa9, 0x7c, 0x13, 0xd0, 0xa0, 0x94, 0x28, 0x0b, 0x71, 0x31, 0xb8, 0xe6, 0x15, 0xf2, 0x53, 0xfe, + 0x0a, 0x96, 0x42, 0x04, 0x20, 0xee, 0x8d, 0x13, 0xab, 0x2e, 0x01, 0x70, 0x10, 0x41, 0xb8, 0x09, + 0x0b, 0xee, 0x68, 0xf5, 0x9e, 0x67, 0x4d, 0x3b, 0x03, 0x92, 0x9e, 0xbf, 0xff, 0x43, 0x09, 0x16, + 0x02, 0x91, 0x20, 0xba, 0x05, 0x59, 0x8f, 0xeb, 0x55, 0x9b, 0xda, 0x99, 0x58, 0xee, 0x66, 0x5c, + 0x0f, 0x5b, 0xd4, 0xce, 0x2c, 0x82, 0xe9, 0xf1, 0xf1, 0x0c, 0x93, 0xcd, 0x52, 0x19, 0xd7, 0x95, + 0x53, 0x4c, 0xcf, 0xe0, 0x8f, 0x4f, 0x36, 0xf8, 0xf3, 0x3e, 0xbf, 0x33, 0x3d, 0x9e, 0xdf, 0xa1, + 0x27, 0x87, 0xc4, 0x07, 0xe9, 0xa0, 0x0e, 0xb6, 0x8f, 0x8d, 0xa6, 0xfc, 0xa3, 0x18, 0x5c, 0x8c, + 0x58, 0x54, 0x23, 0x03, 0x16, 0x06, 0x57, 0xe7, 0xc3, 0x8e, 0x81, 0x45, 0x30, 0x8a, 0x80, 0x2b, + 0x41, 0xee, 0xb9, 0x7f, 0x26, 0xc1, 0x4a, 0x38, 0xee, 0x0b, 0x3f, 0x92, 0xa2, 0xc3, 0x6a, 0x4f, + 0xac, 0xc5, 0x03, 0x89, 0x00, 0x3e, 0x76, 0xd6, 0xa3, 0x8f, 0xd3, 0x84, 0x2d, 0xe1, 0x95, 0x8b, + 0xbd, 0xf0, 0x02, 0xf9, 0x7b, 0x71, 0x58, 0xa2, 0x9d, 0x16, 0x68, 0xc2, 0x07, 0x30, 0x43, 0x8f, + 0x08, 0x8d, 0x7b, 0xe6, 0x8f, 0x63, 0xa3, 0x22, 0xcc, 0x35, 0x8c, 0x6e, 0x53, 0xf7, 0xdc, 0xab, + 0xbd, 0x39, 0x34, 0x67, 0x51, 0x10, 0xd8, 0x8a, 0x4b, 0x88, 0x4e, 0x86, 0x28, 0x60, 0xfa, 0x3c, + 0x0a, 0x28, 0x4f, 0x45, 0xaa, 0x60, 0x78, 0x86, 0x27, 0xf1, 0x92, 0x33, 0x3c, 0x21, 0xab, 0xb2, + 0x1f, 0x4b, 0xb0, 0x1c, 0x9a, 0xbf, 0x41, 0x2a, 0x2c, 0xb3, 0x4b, 0xce, 0xe1, 0x96, 0xbd, 0x36, + 0xac, 0x4f, 0x02, 0x06, 0x70, 0xe1, 0x68, 0x10, 0x68, 0xa1, 0x47, 0xb0, 0xc4, 0xd3, 0x4d, 0x56, + 0xbf, 0xd7, 0x33, 0xb1, 0x65, 0xf1, 0x5c, 0x53, 0x7c, 0x48, 0xd6, 0x8d, 0xc9, 0x5a, 0x73, 0x09, + 0x14, 0x64, 0x06, 0x41, 0x96, 0xfc, 0x08, 0x16, 0x07, 0x10, 0xfd, 0xd6, 0x21, 0x9d, 0xd3, 0x3a, + 0xe4, 0x5f, 0x4f, 0xc0, 0x42, 0xa0, 0x18, 0xd5, 0x21, 0x85, 0x4f, 0xdd, 0x16, 0x0c, 0x7f, 0x12, + 0x28, 0x40, 0xbc, 0x5e, 0x72, 0x29, 0x15, 0x2f, 0x9b, 0xdc, 0xef, 0x92, 0xe9, 0xd0, 0xa9, 0xe3, + 0x7c, 0x67, 0xec, 0x4a, 0x90, 0x34, 0x7a, 0xd8, 0xd4, 0x6c, 0x7e, 0x81, 0x36, 0x33, 0x24, 0x6f, + 0xd6, 0xa6, 0xfd, 0xa2, 0xb5, 0xab, 0x9c, 0x40, 0x71, 0x48, 0xdd, 0xb4, 0xff, 0xf4, 0xd8, 0x69, + 0xff, 0xdc, 0x57, 0x00, 0x8e, 0xf4, 0x16, 0xda, 0x07, 0x70, 0x74, 0x28, 0x4c, 0xe8, 0xed, 0x31, + 0x35, 0xe4, 0xf6, 0x83, 0x87, 0x47, 0xee, 0x87, 0x31, 0x48, 0x79, 0x74, 0x87, 0x3a, 0x64, 0x42, + 0x69, 0xd1, 0x23, 0x6c, 0x4e, 0x93, 0x59, 0x36, 0x61, 0x6b, 0xf2, 0x9e, 0x58, 0xdf, 0x61, 0xac, + 0x1c, 0x5d, 0x2c, 0xb4, 0xfd, 0x00, 0x54, 0xf3, 0x35, 0x88, 0x75, 0xf9, 0x3b, 0x93, 0x36, 0x88, + 0x0c, 0x56, 0x0f, 0x1b, 0xf9, 0x13, 0x58, 0x08, 0x54, 0x8c, 0xae, 0xc3, 0x95, 0x9d, 0xea, 0x76, + 0xa5, 0x90, 0xdf, 0x51, 0xab, 0xfb, 0x25, 0x25, 0x5f, 0xaf, 0x2a, 0x81, 0x88, 0x6c, 0x16, 0xe2, + 0xf9, 0xbd, 0x62, 0x56, 0x72, 0x32, 0xf7, 0x7f, 0x47, 0x82, 0x95, 0xf0, 0x8b, 0x82, 0x64, 0x19, + 0xe9, 0x0c, 0xe7, 0xc0, 0x95, 0x9a, 0xac, 0xa7, 0x80, 0xdd, 0xa7, 0x69, 0xc1, 0xaa, 0x7f, 0xec, + 0xab, 0x56, 0xbf, 0xd3, 0xd1, 0x4c, 0xdd, 0x39, 0x59, 0x7c, 0x7b, 0xac, 0x6b, 0x8a, 0x35, 0x4a, + 0x75, 0xa6, 0x5c, 0xb4, 0x43, 0xc0, 0x3a, 0xb6, 0xe4, 0x1f, 0xce, 0xc0, 0x72, 0x28, 0xc9, 0x0b, + 0xde, 0x15, 0x73, 0xc6, 0x4c, 0x6c, 0x92, 0x31, 0xf3, 0x30, 0xe8, 0x24, 0x79, 0xef, 0x4e, 0x3a, + 0xed, 0x05, 0xb8, 0x44, 0x3b, 0xd4, 0xc4, 0x4b, 0x72, 0xa8, 0x0f, 0x9c, 0xe7, 0x49, 0x84, 0x43, + 0xe5, 0xf9, 0xd3, 0xf1, 0x9d, 0x69, 0xc6, 0xef, 0x4c, 0x51, 0x1d, 0x66, 0xd9, 0x5e, 0xa0, 0xd8, + 0x58, 0xbf, 0x3b, 0x49, 0x8f, 0xaf, 0x8b, 0x9e, 0x67, 0x17, 0xda, 0x04, 0xab, 0x70, 0x2b, 0x9c, + 0x0d, 0xb7, 0xc2, 0xdc, 0xaf, 0x48, 0x90, 0xf6, 0xf1, 0x71, 0x37, 0x14, 0x25, 0xcf, 0x86, 0x22, + 0x7a, 0x04, 0xd3, 0xce, 0xc9, 0xf8, 0x4c, 0x64, 0xe0, 0x15, 0x2e, 0x67, 0x40, 0xbd, 0xb4, 0x9a, + 0x82, 0xd1, 0xc4, 0x0a, 0x65, 0x89, 0x56, 0x61, 0xb6, 0xc9, 0x76, 0x62, 0xd9, 0x96, 0xa2, 0x22, + 0x3e, 0xe5, 0xaf, 0x60, 0x35, 0x8a, 0x96, 0xac, 0xaa, 0xea, 0x4a, 0x7e, 0xaf, 0x76, 0xaf, 0xaa, + 0xec, 0xd2, 0xe4, 0x8f, 0xaa, 0x94, 0x6a, 0x07, 0x3b, 0x75, 0xb5, 0x50, 0x2d, 0x86, 0xa4, 0x97, + 0x6a, 0x07, 0x85, 0x42, 0xa9, 0x56, 0x63, 0xc9, 0xc8, 0x92, 0xa2, 0x54, 0x95, 0x6c, 0x4c, 0x36, + 0x20, 0x59, 0x6b, 0x1c, 0xe3, 0x66, 0xbf, 0x8d, 0xd1, 0x23, 0xc8, 0x99, 0xb8, 0xd1, 0x37, 0x4d, + 0x7a, 0x3e, 0xa5, 0x87, 0x4d, 0xdd, 0x68, 0xaa, 0xe2, 0xf9, 0x3f, 0x3e, 0x38, 0x2e, 0x0d, 0x6c, + 0x4e, 0x16, 0x39, 0x42, 0x79, 0x4a, 0x59, 0x75, 0xc9, 0xf7, 0x29, 0xb5, 0x28, 0x23, 0x51, 0x2e, + 0xbb, 0x17, 0x29, 0xff, 0xbd, 0x18, 0x2c, 0x04, 0xef, 0x01, 0x9e, 0xf3, 0xc6, 0xdb, 0x75, 0x48, + 0x35, 0xdd, 0x3b, 0x60, 0x5c, 0x73, 0x5e, 0x50, 0xf0, 0xb9, 0xa3, 0xe9, 0x89, 0x9e, 0x3b, 0xfa, + 0x18, 0x52, 0xfd, 0x9e, 0xbb, 0x29, 0x99, 0x18, 0x4d, 0xcc, 0xd0, 0x29, 0xf1, 0xe0, 0xa5, 0xe7, + 0x99, 0x73, 0x5f, 0x7a, 0x96, 0xff, 0x49, 0x0c, 0x50, 0x71, 0xe0, 0xbe, 0xf8, 0x9f, 0x44, 0xb5, + 0x85, 0x3e, 0x33, 0x31, 0xf3, 0x82, 0xcf, 0x4c, 0xc8, 0x4f, 0x21, 0x51, 0x32, 0x4d, 0xc3, 0x44, + 0xb7, 0xdd, 0x71, 0xc6, 0xcc, 0x19, 0x09, 0xa6, 0x66, 0xaf, 0xb1, 0x5e, 0xa3, 0xcf, 0x4d, 0x3a, + 0x63, 0x0f, 0xdd, 0x65, 0x5b, 0xeb, 0x54, 0x4a, 0x11, 0x36, 0x0e, 0x6d, 0x88, 0x8b, 0x2d, 0xff, + 0x28, 0x01, 0x70, 0xdf, 0x38, 0xac, 0x9b, 0x7a, 0xab, 0x85, 0xcd, 0x6f, 0xaf, 0xab, 0xee, 0x43, + 0x4a, 0xd8, 0xd9, 0x13, 0xe3, 0x90, 0x77, 0xd5, 0xb8, 0x77, 0x68, 0x49, 0x70, 0xa0, 0x3b, 0x30, + 0x12, 0xcb, 0xd9, 0x4c, 0x5e, 0x31, 0x65, 0x44, 0xc5, 0x72, 0x6e, 0xcb, 0xd6, 0xf9, 0x5f, 0xc5, + 0x21, 0x45, 0xef, 0xc1, 0x0c, 0x26, 0xda, 0x16, 0x07, 0x52, 0xa2, 0x82, 0x39, 0xda, 0x25, 0x0a, + 0xc7, 0x0d, 0xda, 0xdc, 0xec, 0x8b, 0xd8, 0x5c, 0x72, 0x22, 0x9b, 0xfb, 0x14, 0xd2, 0x6d, 0xcd, + 0xb2, 0x55, 0xb3, 0xdf, 0x65, 0xe4, 0x73, 0x23, 0xc9, 0x53, 0x84, 0x40, 0xe9, 0x77, 0x29, 0xfd, + 0x4f, 0xc3, 0x0c, 0x7b, 0xa8, 0x74, 0x15, 0xe8, 0xcc, 0x70, 0x6b, 0xb4, 0xd2, 0xb8, 0xa5, 0x71, + 0xba, 0x5c, 0x0d, 0x66, 0x85, 0xa1, 0x7c, 0x17, 0x92, 0x16, 0xf7, 0xc7, 0x23, 0xc2, 0x11, 0xe1, + 0xb6, 0xcb, 0x53, 0x8a, 0x43, 0xb2, 0x35, 0x07, 0xb3, 0xbc, 0x1f, 0xe4, 0x32, 0xcc, 0xb0, 0x6a, + 0xd0, 0x0a, 0xa0, 0x5a, 0x3d, 0x5f, 0x3f, 0xa8, 0x0d, 0xce, 0x09, 0xe5, 0x52, 0x7e, 0xa7, 0x5e, + 0x7e, 0x94, 0x95, 0x10, 0xc0, 0xcc, 0x7e, 0xfe, 0xa0, 0x56, 0x2a, 0xb2, 0x77, 0x11, 0x0b, 0xf9, + 0xbd, 0x42, 0x69, 0x67, 0xa7, 0x54, 0xcc, 0xc6, 0xb7, 0x12, 0x10, 0x7f, 0x62, 0x1c, 0xca, 0xbf, + 0x13, 0x83, 0x19, 0x76, 0x07, 0x15, 0x3d, 0x80, 0xb4, 0xa5, 0x3d, 0xc3, 0xaa, 0xe7, 0x59, 0xbe, + 0x61, 0x69, 0x2a, 0x46, 0xb5, 0x5e, 0xd3, 0x9e, 0x61, 0xf1, 0x62, 0x64, 0x79, 0x4a, 0x99, 0xb7, + 0x3c, 0xdf, 0xa8, 0x0c, 0xb3, 0xbd, 0xfe, 0xa1, 0x6a, 0xf5, 0x0f, 0x47, 0xbc, 0x50, 0xc1, 0x99, + 0xed, 0xf7, 0x0f, 0xdb, 0xba, 0x75, 0x5c, 0x37, 0xf6, 0xfb, 0x87, 0xb5, 0xfe, 0x61, 0x79, 0x4a, + 0x99, 0xe9, 0xd1, 0x5f, 0x39, 0x15, 0xe6, 0xbd, 0x35, 0xa1, 0xaa, 0xf3, 0x30, 0x82, 0x6f, 0x7f, + 0x7f, 0x6d, 0xfc, 0x87, 0x11, 0xc4, 0x5b, 0x08, 0xfc, 0xe2, 0xe4, 0x1b, 0xb0, 0x10, 0xa8, 0x9d, + 0x44, 0x0c, 0xb6, 0xd1, 0xe3, 0xf7, 0x81, 0xe6, 0x14, 0xf6, 0x41, 0x66, 0x3d, 0x76, 0x59, 0x57, + 0xfe, 0x0d, 0x09, 0xae, 0x14, 0xa8, 0xbd, 0x06, 0x2f, 0xc7, 0x8f, 0x78, 0x3c, 0xe3, 0x01, 0x64, + 0x07, 0x6e, 0xdf, 0xc7, 0x26, 0xba, 0x7d, 0xbf, 0x10, 0x78, 0xda, 0x02, 0x5d, 0x83, 0x94, 0xf3, + 0x04, 0x86, 0xde, 0xe4, 0x4e, 0x05, 0x04, 0xa8, 0xd2, 0x94, 0x7f, 0x4f, 0x82, 0x2b, 0x07, 0x74, + 0x7c, 0x44, 0x08, 0x1b, 0xe6, 0xcd, 0xbe, 0x05, 0x41, 0xdd, 0x51, 0x4d, 0xb7, 0x98, 0xe2, 0x11, + 0xc3, 0x92, 0xc6, 0xaf, 0xbb, 0x9a, 0x75, 0x22, 0x46, 0x35, 0xf9, 0x2d, 0x6f, 0xc0, 0xa5, 0x6d, + 0x6c, 0x8f, 0xdf, 0x00, 0xf9, 0x29, 0x5c, 0x66, 0x77, 0xd3, 0x7d, 0x14, 0xd6, 0xa8, 0x0e, 0xba, + 0x0a, 0xd0, 0xd3, 0x5a, 0x58, 0xb5, 0x8d, 0x13, 0xfe, 0xc2, 0xd3, 0x9c, 0x32, 0x47, 0x20, 0x75, + 0x02, 0x40, 0x97, 0x81, 0x7e, 0xb8, 0x3b, 0x3d, 0x09, 0x25, 0x49, 0x00, 0x74, 0x9f, 0xe7, 0x57, + 0x25, 0xb8, 0x12, 0x5e, 0x27, 0xbf, 0x16, 0x5f, 0x83, 0xc5, 0xa0, 0x52, 0xc5, 0xda, 0x76, 0x5c, + 0xad, 0x66, 0x03, 0x5a, 0xb5, 0xd0, 0x4d, 0x58, 0xe8, 0xe2, 0x53, 0x5b, 0x1d, 0x10, 0x3b, 0x4d, + 0xc0, 0xfb, 0x42, 0x74, 0x79, 0x13, 0xae, 0x14, 0x71, 0x1b, 0x4f, 0x62, 0x05, 0x74, 0x13, 0x80, + 0xd9, 0xb9, 0xeb, 0xed, 0x46, 0x69, 0x70, 0x0b, 0x52, 0x4f, 0x8c, 0x43, 0x95, 0xfb, 0x2d, 0x6e, + 0x34, 0xaf, 0x8e, 0x74, 0xa2, 0x0a, 0x3c, 0x71, 0xe7, 0xd7, 0xab, 0x00, 0x9c, 0xde, 0x35, 0xe9, + 0x39, 0x0e, 0xa9, 0x34, 0xc9, 0x82, 0xf5, 0x22, 0xb3, 0xe8, 0x41, 0xb1, 0xc2, 0x8c, 0xf9, 0x65, + 0x88, 0xf4, 0x42, 0xd6, 0xbb, 0x06, 0x17, 0xb6, 0xb1, 0x3d, 0x96, 0xb0, 0xf2, 0x4f, 0x24, 0x58, + 0x62, 0x3a, 0x2f, 0xb6, 0x7b, 0xf7, 0x8d, 0xc3, 0x51, 0xfa, 0x0e, 0x84, 0x0c, 0xb1, 0x17, 0x09, + 0x19, 0x2a, 0x90, 0x34, 0x75, 0xeb, 0x84, 0x32, 0x8a, 0x0f, 0x3f, 0xc3, 0x15, 0xf6, 0x24, 0x43, + 0x79, 0x4a, 0x99, 0x25, 0xf4, 0x84, 0xd5, 0x32, 0xcc, 0x10, 0x9d, 0xeb, 0x4d, 0xfe, 0x24, 0x4f, + 0xe2, 0x89, 0x71, 0x58, 0x69, 0x8a, 0xc9, 0xe7, 0x17, 0x25, 0x58, 0x21, 0x43, 0xc5, 0x55, 0xc9, + 0xb7, 0x39, 0x32, 0xd1, 0x25, 0x48, 0xd2, 0x2d, 0x6b, 0xf5, 0xf0, 0x8c, 0x8b, 0x33, 0x4b, 0xbf, + 0xb7, 0xce, 0x88, 0x24, 0x17, 0x07, 0x24, 0xe1, 0xe3, 0xb5, 0x08, 0xf3, 0x1e, 0xbb, 0x11, 0x43, + 0x75, 0x0c, 0xc3, 0x49, 0xb9, 0x86, 0x33, 0xfe, 0x00, 0xbd, 0x03, 0x17, 0xd9, 0x00, 0x1d, 0xcf, + 0x4e, 0xfe, 0x6a, 0x0c, 0xb2, 0xc1, 0xee, 0x24, 0xeb, 0x14, 0xfe, 0xfe, 0xbb, 0x7f, 0x76, 0x8c, + 0x5a, 0xa7, 0xf8, 0xe7, 0xc5, 0xb4, 0xe5, 0x7b, 0xc0, 0xe8, 0xa5, 0xbe, 0xf4, 0x14, 0xf9, 0x62, + 0x53, 0x3c, 0xfa, 0xdd, 0x2d, 0xcf, 0x5b, 0x1a, 0xd3, 0x13, 0xbd, 0xa5, 0xf1, 0x2f, 0x13, 0x30, + 0xc3, 0x46, 0x4f, 0xa8, 0x3f, 0x78, 0x9f, 0x3f, 0x65, 0x3d, 0xfc, 0x35, 0x77, 0xc6, 0xc0, 0xf3, + 0x76, 0xf5, 0x27, 0xf4, 0x1d, 0x4c, 0x1b, 0xf3, 0xcc, 0xe8, 0xcd, 0xa1, 0x74, 0xc4, 0x1c, 0x48, + 0xc0, 0x86, 0x15, 0x46, 0x84, 0xbe, 0x80, 0x79, 0x3a, 0xb6, 0xc4, 0x8a, 0x85, 0xc5, 0xf6, 0xef, + 0x9e, 0xe3, 0x78, 0x77, 0x79, 0x4a, 0x49, 0x99, 0x9e, 0x0b, 0xcd, 0x8f, 0x41, 0xcc, 0xb5, 0x0e, + 0xf3, 0xe1, 0xc7, 0xed, 0xa2, 0x5e, 0x12, 0x2a, 0x4f, 0x29, 0xa2, 0xc7, 0x05, 0xef, 0x17, 0x7a, + 0x61, 0xf8, 0x23, 0x00, 0xfa, 0x56, 0xe8, 0xb8, 0x6b, 0x80, 0x39, 0x8a, 0x4d, 0x49, 0xdf, 0x87, + 0x24, 0xee, 0x36, 0xc7, 0x8d, 0xff, 0x67, 0x71, 0xb7, 0x49, 0xc9, 0x6e, 0x41, 0xd6, 0x33, 0x62, + 0x99, 0x81, 0x01, 0xed, 0xf9, 0x8c, 0x3b, 0x24, 0xa9, 0x6d, 0xb9, 0xcb, 0x9a, 0xd4, 0xf8, 0xcb, + 0x1a, 0xb9, 0x01, 0x49, 0xd1, 0xaf, 0xe8, 0x12, 0x2c, 0xdf, 0xaf, 0x6e, 0xa9, 0x24, 0x16, 0x0f, + 0x49, 0xcf, 0xec, 0x97, 0xf6, 0x8a, 0x95, 0xbd, 0xed, 0xac, 0x44, 0x3e, 0x94, 0x83, 0xbd, 0x3d, + 0xf2, 0x11, 0x43, 0x49, 0x98, 0x2e, 0x56, 0xf7, 0x4a, 0xd9, 0x38, 0x9a, 0x87, 0x24, 0x8b, 0xca, + 0x4b, 0xc5, 0xec, 0x34, 0x89, 0xd7, 0xef, 0xe5, 0x2b, 0xe4, 0x77, 0x82, 0x44, 0xfd, 0x22, 0x5f, + 0x74, 0x13, 0xb2, 0xdb, 0xd8, 0xf6, 0x4f, 0x04, 0x61, 0xce, 0xe0, 0xb7, 0x25, 0x40, 0xc4, 0x8b, + 0x31, 0xcc, 0x10, 0x5f, 0x3a, 0xed, 0xf3, 0xa5, 0xee, 0x83, 0x3e, 0x92, 0xf7, 0x41, 0x1f, 0xbf, + 0x13, 0x8d, 0x05, 0x9c, 0xa8, 0xdf, 0x01, 0xc7, 0x83, 0x0e, 0x58, 0x0c, 0xaa, 0xc4, 0x44, 0x83, + 0x4a, 0xee, 0xc1, 0x92, 0x4f, 0x70, 0xee, 0x7a, 0xdf, 0x81, 0xe9, 0x27, 0xc6, 0xa1, 0x70, 0xb9, + 0x57, 0x87, 0x72, 0x53, 0x28, 0xea, 0xd8, 0x7e, 0xf6, 0x4d, 0x58, 0x2a, 0x68, 0xdd, 0x06, 0x6e, + 0x8f, 0x56, 0xeb, 0x9b, 0xb0, 0xc4, 0x5c, 0xf2, 0x68, 0xd4, 0xdf, 0x92, 0xe0, 0x1a, 0x9f, 0xb6, + 0x07, 0x52, 0x3b, 0xa3, 0xa6, 0xb6, 0xc7, 0xb0, 0x14, 0xf2, 0xfc, 0xe4, 0x88, 0x93, 0x07, 0x21, + 0xd5, 0xa0, 0xc1, 0x47, 0x2a, 0x47, 0x2f, 0x0f, 0xfe, 0x9d, 0x04, 0xd7, 0x58, 0x30, 0x15, 0x2d, + 0x78, 0x98, 0x13, 0xfd, 0x36, 0x85, 0x7e, 0xa1, 0x60, 0x6b, 0x13, 0xae, 0x90, 0x31, 0x33, 0x49, + 0x63, 0x64, 0x1b, 0x5e, 0xa1, 0x56, 0x38, 0x40, 0xf4, 0xad, 0x2e, 0x18, 0xfe, 0xb6, 0x04, 0xd7, + 0x22, 0xab, 0xe5, 0x03, 0xe1, 0x4b, 0xb8, 0x10, 0xa2, 0x66, 0x31, 0x30, 0x26, 0xd0, 0xf3, 0xd2, + 0xa0, 0x9e, 0xc7, 0x1f, 0x33, 0xef, 0xc3, 0x35, 0x3e, 0x10, 0x26, 0x51, 0xeb, 0xda, 0x2e, 0xa4, + 0x7d, 0xff, 0xd6, 0x03, 0x5d, 0x84, 0xa5, 0x42, 0x75, 0xaf, 0x5e, 0xda, 0x0b, 0x1e, 0x13, 0xca, + 0xc2, 0xbc, 0x28, 0xa8, 0x97, 0xbe, 0xa8, 0x67, 0x25, 0xb4, 0x08, 0x69, 0x01, 0xa1, 0xff, 0xc6, + 0x21, 0x1b, 0x5b, 0xdb, 0x77, 0x5f, 0x58, 0xf3, 0x3c, 0x74, 0x46, 0x1c, 0x71, 0x69, 0xef, 0x60, + 0x37, 0xec, 0x3f, 0x46, 0xa4, 0x60, 0xb6, 0x42, 0x01, 0x9c, 0xa3, 0x52, 0xa9, 0x7d, 0xa6, 0xe6, + 0xf7, 0xf2, 0x3b, 0x8f, 0x6a, 0x95, 0x5a, 0x36, 0xb6, 0xf6, 0x8f, 0x25, 0x40, 0x83, 0x3b, 0x99, + 0xe8, 0x06, 0x5c, 0x53, 0x4a, 0x3b, 0x34, 0x09, 0x1f, 0xbd, 0x8f, 0x36, 0x0f, 0xc9, 0xd2, 0x83, + 0x83, 0xfc, 0x8e, 0x5a, 0xaf, 0x66, 0x25, 0xd2, 0x80, 0xbd, 0x6a, 0x5d, 0x75, 0x20, 0xf4, 0x60, + 0xf0, 0xb6, 0x52, 0xca, 0xd7, 0x4b, 0x8a, 0x5a, 0x2f, 0xe7, 0xf7, 0xd8, 0x7f, 0xa4, 0xd8, 0x29, + 0xd5, 0x6a, 0xec, 0x73, 0x1a, 0xe5, 0x60, 0xc5, 0x8b, 0xa0, 0x56, 0x15, 0x46, 0x5e, 0xcb, 0x26, + 0x88, 0xa2, 0x1c, 0x54, 0x4f, 0xc1, 0x0c, 0x99, 0x28, 0x4a, 0x5f, 0x54, 0x6a, 0xf5, 0x5a, 0x76, + 0x76, 0x4d, 0x01, 0x70, 0xfd, 0x29, 0xba, 0x02, 0xab, 0xc5, 0x9d, 0x7d, 0x95, 0xcc, 0x49, 0x21, + 0x9a, 0x58, 0x80, 0x14, 0xd7, 0x04, 0xc1, 0xc8, 0x4a, 0x68, 0x19, 0x16, 0x7d, 0xda, 0xa0, 0xe0, + 0xd8, 0xe6, 0xff, 0x93, 0x29, 0xd3, 0x1a, 0x36, 0x9f, 0xe9, 0x0d, 0x8c, 0xfe, 0x86, 0x04, 0x19, + 0xff, 0xab, 0x99, 0xe8, 0xf6, 0xc8, 0xf0, 0xcf, 0xf3, 0x8c, 0x68, 0xee, 0xce, 0x98, 0xd8, 0xcc, + 0xdc, 0xe5, 0xcd, 0x3f, 0xfd, 0xe3, 0xff, 0xf2, 0x97, 0x62, 0xb7, 0xe5, 0x37, 0x36, 0x9e, 0x6d, + 0x6e, 0xfc, 0x2c, 0x1b, 0x63, 0xdf, 0xed, 0x99, 0xc6, 0x13, 0xdc, 0xb0, 0xad, 0x8d, 0xb5, 0x9f, + 0xdf, 0xe0, 0xef, 0xf3, 0xdf, 0xe5, 0x71, 0xca, 0x5d, 0x69, 0x0d, 0xfd, 0x40, 0x82, 0x94, 0xe7, + 0x9d, 0x65, 0xf4, 0xe6, 0xd8, 0x6f, 0x65, 0xe7, 0xd6, 0xc6, 0x41, 0xe5, 0xa2, 0x6d, 0x50, 0xd1, + 0xde, 0x94, 0x5f, 0x8b, 0x12, 0x8d, 0xbe, 0xe5, 0x7c, 0x97, 0xdd, 0xf6, 0x20, 0x72, 0xfd, 0xa6, + 0x04, 0x8b, 0x03, 0x6f, 0x03, 0xa3, 0x8d, 0x71, 0x12, 0xdf, 0x5e, 0x0d, 0xbe, 0x3d, 0x3e, 0x01, + 0x97, 0xf4, 0x7d, 0x2a, 0xe9, 0x86, 0xbc, 0x36, 0x4a, 0x89, 0xae, 0x4b, 0x10, 0xf2, 0x2a, 0x63, + 0xcb, 0xab, 0x4c, 0x2a, 0xaf, 0xf2, 0xe2, 0xf2, 0x9a, 0x3e, 0x79, 0x7f, 0x41, 0x82, 0xb4, 0xef, + 0xfd, 0x41, 0xf4, 0x56, 0xe4, 0xff, 0xde, 0x18, 0x7c, 0xfa, 0x30, 0x77, 0x7b, 0x3c, 0x64, 0x2e, + 0xe3, 0x32, 0x95, 0x71, 0x01, 0xa5, 0x89, 0x8c, 0xee, 0xe9, 0xa7, 0xff, 0x28, 0xc1, 0x72, 0x68, + 0x26, 0x10, 0xbd, 0x1b, 0x79, 0x70, 0x2c, 0x3a, 0x6f, 0x98, 0x1b, 0x33, 0x0d, 0x24, 0xb7, 0xa8, + 0x34, 0x9a, 0x7c, 0xc7, 0xab, 0x31, 0xc3, 0x6c, 0x69, 0x5d, 0xfd, 0x6b, 0xb6, 0xbb, 0x4b, 0x0d, + 0x32, 0x90, 0x2b, 0xba, 0x2b, 0xad, 0x3d, 0xbe, 0x23, 0xdf, 0x8a, 0xb4, 0xdf, 0x41, 0x74, 0xda, + 0xbe, 0xd0, 0xe4, 0x61, 0x64, 0xfb, 0x86, 0xa5, 0x1a, 0x27, 0x6d, 0xdf, 0x26, 0x6b, 0x1f, 0x99, + 0x4b, 0x82, 0xad, 0x1b, 0x90, 0x76, 0x63, 0xed, 0xe7, 0x69, 0xfb, 0x36, 0x6f, 0xb9, 0x34, 0x6e, + 0xeb, 0x22, 0xd0, 0xd1, 0xef, 0x4b, 0x80, 0x06, 0x13, 0x8b, 0x28, 0xca, 0x86, 0x23, 0x73, 0x90, + 0x63, 0xb7, 0x4c, 0xa3, 0x2d, 0xfb, 0x19, 0x34, 0x59, 0xcb, 0x1e, 0xaf, 0xa1, 0xb1, 0x9b, 0x85, + 0x7e, 0x22, 0x89, 0xc7, 0x3e, 0x03, 0xa9, 0xc2, 0xcd, 0xa1, 0x16, 0x1f, 0x9a, 0x28, 0xcd, 0xbd, + 0x3b, 0x11, 0x0d, 0x1f, 0x2c, 0xfe, 0x46, 0x8e, 0x6b, 0x9e, 0x4e, 0x23, 0xc7, 0xb0, 0x4d, 0xf4, + 0xaf, 0x24, 0x58, 0x0e, 0xcd, 0x67, 0x46, 0x1a, 0xe6, 0xb0, 0xec, 0x67, 0x2e, 0xe2, 0x7d, 0x37, + 0xd1, 0x92, 0xb5, 0x49, 0xbb, 0x6b, 0x6d, 0xfc, 0xee, 0xfa, 0x43, 0x09, 0x56, 0xa3, 0x56, 0x0e, + 0xe8, 0x83, 0xa1, 0x5e, 0x24, 0x32, 0x1a, 0xcb, 0x8d, 0x1f, 0x18, 0xca, 0x1d, 0xda, 0xc4, 0x96, + 0xfc, 0xf6, 0xd0, 0xce, 0x0a, 0x89, 0x1e, 0xc9, 0x70, 0x7b, 0x5b, 0x7e, 0x2b, 0xaa, 0xcb, 0xc2, + 0x29, 0x68, 0x73, 0xa3, 0xd6, 0x1b, 0x91, 0xcd, 0x1d, 0xb1, 0x40, 0x39, 0x47, 0x73, 0x37, 0xdf, + 0x8e, 0xee, 0xd1, 0x10, 0xc9, 0xb9, 0x77, 0x79, 0x7b, 0xf3, 0xad, 0xd0, 0x7e, 0x8d, 0xa4, 0x40, + 0x7f, 0x20, 0xc1, 0x72, 0xe8, 0x72, 0x24, 0xd2, 0x4e, 0x87, 0x2d, 0x5e, 0x26, 0x69, 0x28, 0xf7, + 0xa1, 0x68, 0xe2, 0x86, 0x3e, 0xbe, 0x83, 0x26, 0x69, 0x25, 0xfa, 0x1f, 0x3c, 0x85, 0x1a, 0xb2, + 0x8c, 0x41, 0xef, 0x0f, 0x71, 0x1f, 0xd1, 0xab, 0xad, 0xdc, 0x07, 0x93, 0x92, 0x71, 0xc7, 0xe3, + 0x6f, 0xf3, 0x04, 0xb6, 0xec, 0xb4, 0x79, 0x3c, 0x43, 0x46, 0x3f, 0x96, 0x60, 0x35, 0x6a, 0x45, + 0x14, 0x69, 0xc5, 0x23, 0x96, 0x50, 0x91, 0x4e, 0x88, 0xb7, 0x6a, 0xed, 0x1c, 0x3d, 0xb9, 0x36, + 0x51, 0x4f, 0xfe, 0x40, 0x82, 0x6c, 0x70, 0xbf, 0x07, 0xad, 0x0f, 0x75, 0x41, 0x03, 0xc9, 0xea, + 0xdc, 0xe8, 0xfc, 0xb8, 0xbc, 0x4e, 0x1b, 0x74, 0x4b, 0xbe, 0x11, 0xa5, 0x72, 0x4f, 0xfe, 0x9c, + 0x47, 0xf8, 0xd9, 0xe0, 0x86, 0x4f, 0xa4, 0x5c, 0x11, 0x3b, 0x43, 0x13, 0xc8, 0xb5, 0x79, 0x23, + 0x54, 0x69, 0x1e, 0xa1, 0xf8, 0xe0, 0xfe, 0x0b, 0x12, 0xa4, 0x7d, 0x1b, 0x3b, 0x91, 0x11, 0x68, + 0xd8, 0xf6, 0xcf, 0x38, 0x12, 0xbd, 0x45, 0x25, 0x7a, 0x1d, 0x8d, 0x23, 0x11, 0xfa, 0xeb, 0x12, + 0x2c, 0x04, 0xf6, 0x32, 0xd0, 0x9d, 0x21, 0x23, 0x69, 0x70, 0xf7, 0x25, 0xb7, 0x3e, 0x2e, 0x3a, + 0x1f, 0x70, 0x7e, 0xf9, 0x86, 0xf7, 0x24, 0xfa, 0x45, 0xfa, 0x66, 0x81, 0x7f, 0x8b, 0x23, 0xb2, + 0x1b, 0x23, 0xf6, 0x42, 0x22, 0x07, 0x09, 0x97, 0x64, 0x6d, 0x2c, 0x4d, 0xfd, 0x59, 0x09, 0xe6, + 0xbd, 0x9b, 0x6c, 0x68, 0x6d, 0xf8, 0x3c, 0xeb, 0x4d, 0xff, 0xe5, 0x86, 0x67, 0x23, 0xe5, 0x35, + 0x2a, 0xc8, 0x6b, 0xf2, 0xb5, 0x48, 0x7f, 0xc2, 0x72, 0x9d, 0xc4, 0x80, 0xbe, 0x2f, 0x41, 0xca, + 0x93, 0xfe, 0x8c, 0x5c, 0xba, 0x0e, 0xe6, 0x76, 0x23, 0x97, 0xae, 0x21, 0xd9, 0x54, 0xf9, 0x0d, + 0x2a, 0xd2, 0xab, 0x68, 0x94, 0x48, 0xe8, 0xe7, 0x60, 0xce, 0xc9, 0x37, 0xa3, 0x37, 0x86, 0x4c, + 0x50, 0x93, 0x28, 0xc4, 0x5f, 0xfb, 0x80, 0x2b, 0x62, 0x55, 0x93, 0x5e, 0xf9, 0x59, 0x98, 0xf7, + 0xa6, 0x5b, 0x23, 0x3b, 0x25, 0x24, 0x27, 0x1b, 0x69, 0x16, 0xbc, 0xf2, 0xb5, 0x91, 0x95, 0x53, + 0x93, 0xf0, 0xe4, 0x85, 0xa3, 0x4d, 0x62, 0x30, 0x79, 0x1c, 0x59, 0xfb, 0x3b, 0xb4, 0xf6, 0xb7, + 0xe4, 0x9b, 0x23, 0x6a, 0xbf, 0xdb, 0xa0, 0x4c, 0xef, 0x4a, 0x6b, 0x5b, 0xdf, 0x97, 0xe0, 0x52, + 0xc3, 0xe8, 0x84, 0x57, 0xbe, 0x95, 0x2c, 0xb6, 0x7b, 0xfb, 0xa4, 0x8e, 0x7d, 0xe9, 0xf1, 0x87, + 0x1c, 0xa5, 0x65, 0xb4, 0xb5, 0x6e, 0x6b, 0xdd, 0x30, 0x5b, 0x1b, 0x2d, 0xdc, 0xa5, 0x12, 0x6c, + 0xb0, 0x22, 0xad, 0xa7, 0x5b, 0x81, 0xff, 0x0f, 0xfd, 0x71, 0xb3, 0xdd, 0xfb, 0x8d, 0xd8, 0xd2, + 0x36, 0x23, 0x2d, 0xb4, 0x8d, 0x7e, 0x93, 0xf4, 0xd3, 0xfa, 0xc3, 0xcd, 0x7f, 0x23, 0xa0, 0x5f, + 0x52, 0xe8, 0x97, 0xc5, 0x76, 0xef, 0xcb, 0x87, 0x9b, 0x87, 0x33, 0x94, 0xe1, 0xbb, 0xff, 0x3f, + 0x00, 0x00, 0xff, 0xff, 0x1d, 0x42, 0x75, 0x5c, 0xc5, 0x7a, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/storage.pb.go b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/storage.pb.go new file mode 100644 index 000000000..7431e1720 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/storage.pb.go @@ -0,0 +1,1614 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/privacy/dlp/v2/storage.proto + +package dlp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Categorization of results based on how likely they are to represent a match, +// based on the number of elements they contain which imply a match. +type Likelihood int32 + +const ( + // Default value; information with all likelihoods is included. + Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0 + // Few matching elements. + Likelihood_VERY_UNLIKELY Likelihood = 1 + Likelihood_UNLIKELY Likelihood = 2 + // Some matching elements. + Likelihood_POSSIBLE Likelihood = 3 + Likelihood_LIKELY Likelihood = 4 + // Many matching elements. + Likelihood_VERY_LIKELY Likelihood = 5 +) + +var Likelihood_name = map[int32]string{ + 0: "LIKELIHOOD_UNSPECIFIED", + 1: "VERY_UNLIKELY", + 2: "UNLIKELY", + 3: "POSSIBLE", + 4: "LIKELY", + 5: "VERY_LIKELY", +} +var Likelihood_value = map[string]int32{ + "LIKELIHOOD_UNSPECIFIED": 0, + "VERY_UNLIKELY": 1, + "UNLIKELY": 2, + "POSSIBLE": 3, + "LIKELY": 4, + "VERY_LIKELY": 5, +} + +func (x Likelihood) String() string { + return proto.EnumName(Likelihood_name, int32(x)) +} +func (Likelihood) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Type of information detected by the API. +type InfoType struct { + // Name of the information type. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *InfoType) Reset() { *m = InfoType{} } +func (m *InfoType) String() string { return proto.CompactTextString(m) } +func (*InfoType) ProtoMessage() {} +func (*InfoType) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *InfoType) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Custom information type provided by the user. Used to find domain-specific +// sensitive information configurable to the data in question. +type CustomInfoType struct { + // Info type configuration. All custom info types must have configurations + // that do not conflict with built-in info types or other custom info types. + InfoType *InfoType `protobuf:"bytes,1,opt,name=info_type,json=infoType" json:"info_type,omitempty"` + // Likelihood to return for this custom info type. This base value can be + // altered by a detection rule if the finding meets the criteria specified by + // the rule. Defaults to `VERY_LIKELY` if not specified. + Likelihood Likelihood `protobuf:"varint,6,opt,name=likelihood,enum=google.privacy.dlp.v2.Likelihood" json:"likelihood,omitempty"` + // Types that are valid to be assigned to Type: + // *CustomInfoType_Dictionary_ + // *CustomInfoType_Regex_ + // *CustomInfoType_SurrogateType_ + Type isCustomInfoType_Type `protobuf_oneof:"type"` + // Set of detection rules to apply to all findings of this custom info type. + // Rules are applied in order that they are specified. Not supported for the + // `surrogate_type` custom info type. + DetectionRules []*CustomInfoType_DetectionRule `protobuf:"bytes,7,rep,name=detection_rules,json=detectionRules" json:"detection_rules,omitempty"` +} + +func (m *CustomInfoType) Reset() { *m = CustomInfoType{} } +func (m *CustomInfoType) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType) ProtoMessage() {} +func (*CustomInfoType) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isCustomInfoType_Type interface { + isCustomInfoType_Type() +} + +type CustomInfoType_Dictionary_ struct { + Dictionary *CustomInfoType_Dictionary `protobuf:"bytes,2,opt,name=dictionary,oneof"` +} +type CustomInfoType_Regex_ struct { + Regex *CustomInfoType_Regex `protobuf:"bytes,3,opt,name=regex,oneof"` +} +type CustomInfoType_SurrogateType_ struct { + SurrogateType *CustomInfoType_SurrogateType `protobuf:"bytes,4,opt,name=surrogate_type,json=surrogateType,oneof"` +} + +func (*CustomInfoType_Dictionary_) isCustomInfoType_Type() {} +func (*CustomInfoType_Regex_) isCustomInfoType_Type() {} +func (*CustomInfoType_SurrogateType_) isCustomInfoType_Type() {} + +func (m *CustomInfoType) GetType() isCustomInfoType_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *CustomInfoType) GetInfoType() *InfoType { + if m != nil { + return m.InfoType + } + return nil +} + +func (m *CustomInfoType) GetLikelihood() Likelihood { + if m != nil { + return m.Likelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *CustomInfoType) GetDictionary() *CustomInfoType_Dictionary { + if x, ok := m.GetType().(*CustomInfoType_Dictionary_); ok { + return x.Dictionary + } + return nil +} + +func (m *CustomInfoType) GetRegex() *CustomInfoType_Regex { + if x, ok := m.GetType().(*CustomInfoType_Regex_); ok { + return x.Regex + } + return nil +} + +func (m *CustomInfoType) GetSurrogateType() *CustomInfoType_SurrogateType { + if x, ok := m.GetType().(*CustomInfoType_SurrogateType_); ok { + return x.SurrogateType + } + return nil +} + +func (m *CustomInfoType) GetDetectionRules() []*CustomInfoType_DetectionRule { + if m != nil { + return m.DetectionRules + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CustomInfoType) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CustomInfoType_OneofMarshaler, _CustomInfoType_OneofUnmarshaler, _CustomInfoType_OneofSizer, []interface{}{ + (*CustomInfoType_Dictionary_)(nil), + (*CustomInfoType_Regex_)(nil), + (*CustomInfoType_SurrogateType_)(nil), + } +} + +func _CustomInfoType_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CustomInfoType) + // type + switch x := m.Type.(type) { + case *CustomInfoType_Dictionary_: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Dictionary); err != nil { + return err + } + case *CustomInfoType_Regex_: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Regex); err != nil { + return err + } + case *CustomInfoType_SurrogateType_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SurrogateType); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CustomInfoType.Type has unexpected type %T", x) + } + return nil +} + +func _CustomInfoType_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CustomInfoType) + switch tag { + case 2: // type.dictionary + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_Dictionary) + err := b.DecodeMessage(msg) + m.Type = &CustomInfoType_Dictionary_{msg} + return true, err + case 3: // type.regex + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_Regex) + err := b.DecodeMessage(msg) + m.Type = &CustomInfoType_Regex_{msg} + return true, err + case 4: // type.surrogate_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_SurrogateType) + err := b.DecodeMessage(msg) + m.Type = &CustomInfoType_SurrogateType_{msg} + return true, err + default: + return false, nil + } +} + +func _CustomInfoType_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CustomInfoType) + // type + switch x := m.Type.(type) { + case *CustomInfoType_Dictionary_: + s := proto.Size(x.Dictionary) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CustomInfoType_Regex_: + s := proto.Size(x.Regex) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *CustomInfoType_SurrogateType_: + s := proto.Size(x.SurrogateType) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Custom information type based on a dictionary of words or phrases. This can +// be used to match sensitive information specific to the data, such as a list +// of employee IDs or job titles. +// +// Dictionary words are case-insensitive and all characters other than letters +// and digits in the unicode [Basic Multilingual +// Plane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane) +// will be replaced with whitespace when scanning for matches, so the +// dictionary phrase "Sam Johnson" will match all three phrases "sam johnson", +// "Sam, Johnson", and "Sam (Johnson)". Additionally, the characters +// surrounding any match must be of a different type than the adjacent +// characters within the word, so letters must be next to non-letters and +// digits next to non-digits. For example, the dictionary word "jen" will +// match the first three letters of the text "jen123" but will return no +// matches for "jennifer". +// +// Dictionary words containing a large number of characters that are not +// letters or digits may result in unexpected findings because such characters +// are treated as whitespace. +type CustomInfoType_Dictionary struct { + // Types that are valid to be assigned to Source: + // *CustomInfoType_Dictionary_WordList_ + Source isCustomInfoType_Dictionary_Source `protobuf_oneof:"source"` +} + +func (m *CustomInfoType_Dictionary) Reset() { *m = CustomInfoType_Dictionary{} } +func (m *CustomInfoType_Dictionary) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_Dictionary) ProtoMessage() {} +func (*CustomInfoType_Dictionary) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 0} } + +type isCustomInfoType_Dictionary_Source interface { + isCustomInfoType_Dictionary_Source() +} + +type CustomInfoType_Dictionary_WordList_ struct { + WordList *CustomInfoType_Dictionary_WordList `protobuf:"bytes,1,opt,name=word_list,json=wordList,oneof"` +} + +func (*CustomInfoType_Dictionary_WordList_) isCustomInfoType_Dictionary_Source() {} + +func (m *CustomInfoType_Dictionary) GetSource() isCustomInfoType_Dictionary_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *CustomInfoType_Dictionary) GetWordList() *CustomInfoType_Dictionary_WordList { + if x, ok := m.GetSource().(*CustomInfoType_Dictionary_WordList_); ok { + return x.WordList + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CustomInfoType_Dictionary) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CustomInfoType_Dictionary_OneofMarshaler, _CustomInfoType_Dictionary_OneofUnmarshaler, _CustomInfoType_Dictionary_OneofSizer, []interface{}{ + (*CustomInfoType_Dictionary_WordList_)(nil), + } +} + +func _CustomInfoType_Dictionary_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CustomInfoType_Dictionary) + // source + switch x := m.Source.(type) { + case *CustomInfoType_Dictionary_WordList_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.WordList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CustomInfoType_Dictionary.Source has unexpected type %T", x) + } + return nil +} + +func _CustomInfoType_Dictionary_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CustomInfoType_Dictionary) + switch tag { + case 1: // source.word_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_Dictionary_WordList) + err := b.DecodeMessage(msg) + m.Source = &CustomInfoType_Dictionary_WordList_{msg} + return true, err + default: + return false, nil + } +} + +func _CustomInfoType_Dictionary_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CustomInfoType_Dictionary) + // source + switch x := m.Source.(type) { + case *CustomInfoType_Dictionary_WordList_: + s := proto.Size(x.WordList) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message defining a list of words or phrases to search for in the data. +type CustomInfoType_Dictionary_WordList struct { + // Words or phrases defining the dictionary. The dictionary must contain + // at least one phrase and every phrase must contain at least 2 characters + // that are letters or digits. [required] + Words []string `protobuf:"bytes,1,rep,name=words" json:"words,omitempty"` +} + +func (m *CustomInfoType_Dictionary_WordList) Reset() { *m = CustomInfoType_Dictionary_WordList{} } +func (m *CustomInfoType_Dictionary_WordList) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_Dictionary_WordList) ProtoMessage() {} +func (*CustomInfoType_Dictionary_WordList) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 0, 0} +} + +func (m *CustomInfoType_Dictionary_WordList) GetWords() []string { + if m != nil { + return m.Words + } + return nil +} + +// Message defining a custom regular expression. +type CustomInfoType_Regex struct { + // Pattern defining the regular expression. + Pattern string `protobuf:"bytes,1,opt,name=pattern" json:"pattern,omitempty"` +} + +func (m *CustomInfoType_Regex) Reset() { *m = CustomInfoType_Regex{} } +func (m *CustomInfoType_Regex) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_Regex) ProtoMessage() {} +func (*CustomInfoType_Regex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 1} } + +func (m *CustomInfoType_Regex) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +// Message for detecting output from deidentification transformations +// such as +// [`CryptoReplaceFfxFpeConfig`](/dlp/docs/reference/rest/v2/content/deidentify#CryptoReplaceFfxFpeConfig). +// These types of transformations are +// those that perform pseudonymization, thereby producing a "surrogate" as +// output. This should be used in conjunction with a field on the +// transformation such as `surrogate_info_type`. This custom info type does +// not support the use of `detection_rules`. +type CustomInfoType_SurrogateType struct { +} + +func (m *CustomInfoType_SurrogateType) Reset() { *m = CustomInfoType_SurrogateType{} } +func (m *CustomInfoType_SurrogateType) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_SurrogateType) ProtoMessage() {} +func (*CustomInfoType_SurrogateType) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 2} } + +// Rule for modifying a custom info type to alter behavior under certain +// circumstances, depending on the specific details of the rule. Not supported +// for the `surrogate_type` custom info type. +type CustomInfoType_DetectionRule struct { + // Types that are valid to be assigned to Type: + // *CustomInfoType_DetectionRule_HotwordRule_ + Type isCustomInfoType_DetectionRule_Type `protobuf_oneof:"type"` +} + +func (m *CustomInfoType_DetectionRule) Reset() { *m = CustomInfoType_DetectionRule{} } +func (m *CustomInfoType_DetectionRule) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_DetectionRule) ProtoMessage() {} +func (*CustomInfoType_DetectionRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1, 3} } + +type isCustomInfoType_DetectionRule_Type interface { + isCustomInfoType_DetectionRule_Type() +} + +type CustomInfoType_DetectionRule_HotwordRule_ struct { + HotwordRule *CustomInfoType_DetectionRule_HotwordRule `protobuf:"bytes,1,opt,name=hotword_rule,json=hotwordRule,oneof"` +} + +func (*CustomInfoType_DetectionRule_HotwordRule_) isCustomInfoType_DetectionRule_Type() {} + +func (m *CustomInfoType_DetectionRule) GetType() isCustomInfoType_DetectionRule_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *CustomInfoType_DetectionRule) GetHotwordRule() *CustomInfoType_DetectionRule_HotwordRule { + if x, ok := m.GetType().(*CustomInfoType_DetectionRule_HotwordRule_); ok { + return x.HotwordRule + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CustomInfoType_DetectionRule) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CustomInfoType_DetectionRule_OneofMarshaler, _CustomInfoType_DetectionRule_OneofUnmarshaler, _CustomInfoType_DetectionRule_OneofSizer, []interface{}{ + (*CustomInfoType_DetectionRule_HotwordRule_)(nil), + } +} + +func _CustomInfoType_DetectionRule_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CustomInfoType_DetectionRule) + // type + switch x := m.Type.(type) { + case *CustomInfoType_DetectionRule_HotwordRule_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HotwordRule); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("CustomInfoType_DetectionRule.Type has unexpected type %T", x) + } + return nil +} + +func _CustomInfoType_DetectionRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CustomInfoType_DetectionRule) + switch tag { + case 1: // type.hotword_rule + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CustomInfoType_DetectionRule_HotwordRule) + err := b.DecodeMessage(msg) + m.Type = &CustomInfoType_DetectionRule_HotwordRule_{msg} + return true, err + default: + return false, nil + } +} + +func _CustomInfoType_DetectionRule_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CustomInfoType_DetectionRule) + // type + switch x := m.Type.(type) { + case *CustomInfoType_DetectionRule_HotwordRule_: + s := proto.Size(x.HotwordRule) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for specifying a window around a finding to apply a detection +// rule. +type CustomInfoType_DetectionRule_Proximity struct { + // Number of characters before the finding to consider. + WindowBefore int32 `protobuf:"varint,1,opt,name=window_before,json=windowBefore" json:"window_before,omitempty"` + // Number of characters after the finding to consider. + WindowAfter int32 `protobuf:"varint,2,opt,name=window_after,json=windowAfter" json:"window_after,omitempty"` +} + +func (m *CustomInfoType_DetectionRule_Proximity) Reset() { + *m = CustomInfoType_DetectionRule_Proximity{} +} +func (m *CustomInfoType_DetectionRule_Proximity) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_DetectionRule_Proximity) ProtoMessage() {} +func (*CustomInfoType_DetectionRule_Proximity) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 3, 0} +} + +func (m *CustomInfoType_DetectionRule_Proximity) GetWindowBefore() int32 { + if m != nil { + return m.WindowBefore + } + return 0 +} + +func (m *CustomInfoType_DetectionRule_Proximity) GetWindowAfter() int32 { + if m != nil { + return m.WindowAfter + } + return 0 +} + +// Message for specifying an adjustment to the likelihood of a finding as +// part of a detection rule. +type CustomInfoType_DetectionRule_LikelihoodAdjustment struct { + // Types that are valid to be assigned to Adjustment: + // *CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood + // *CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood + Adjustment isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment `protobuf_oneof:"adjustment"` +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) Reset() { + *m = CustomInfoType_DetectionRule_LikelihoodAdjustment{} +} +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) String() string { + return proto.CompactTextString(m) +} +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment) ProtoMessage() {} +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 3, 1} +} + +type isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment interface { + isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment() +} + +type CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood struct { + FixedLikelihood Likelihood `protobuf:"varint,1,opt,name=fixed_likelihood,json=fixedLikelihood,enum=google.privacy.dlp.v2.Likelihood,oneof"` +} +type CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood struct { + RelativeLikelihood int32 `protobuf:"varint,2,opt,name=relative_likelihood,json=relativeLikelihood,oneof"` +} + +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood) isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment() { +} +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood) isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment() { +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) GetAdjustment() isCustomInfoType_DetectionRule_LikelihoodAdjustment_Adjustment { + if m != nil { + return m.Adjustment + } + return nil +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) GetFixedLikelihood() Likelihood { + if x, ok := m.GetAdjustment().(*CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood); ok { + return x.FixedLikelihood + } + return Likelihood_LIKELIHOOD_UNSPECIFIED +} + +func (m *CustomInfoType_DetectionRule_LikelihoodAdjustment) GetRelativeLikelihood() int32 { + if x, ok := m.GetAdjustment().(*CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood); ok { + return x.RelativeLikelihood + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*CustomInfoType_DetectionRule_LikelihoodAdjustment) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofMarshaler, _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofUnmarshaler, _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofSizer, []interface{}{ + (*CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood)(nil), + (*CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood)(nil), + } +} + +func _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*CustomInfoType_DetectionRule_LikelihoodAdjustment) + // adjustment + switch x := m.Adjustment.(type) { + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.FixedLikelihood)) + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.RelativeLikelihood)) + case nil: + default: + return fmt.Errorf("CustomInfoType_DetectionRule_LikelihoodAdjustment.Adjustment has unexpected type %T", x) + } + return nil +} + +func _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*CustomInfoType_DetectionRule_LikelihoodAdjustment) + switch tag { + case 1: // adjustment.fixed_likelihood + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Adjustment = &CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood{Likelihood(x)} + return true, err + case 2: // adjustment.relative_likelihood + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Adjustment = &CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood{int32(x)} + return true, err + default: + return false, nil + } +} + +func _CustomInfoType_DetectionRule_LikelihoodAdjustment_OneofSizer(msg proto.Message) (n int) { + m := msg.(*CustomInfoType_DetectionRule_LikelihoodAdjustment) + // adjustment + switch x := m.Adjustment.(type) { + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_FixedLikelihood: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.FixedLikelihood)) + case *CustomInfoType_DetectionRule_LikelihoodAdjustment_RelativeLikelihood: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.RelativeLikelihood)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Detection rule that adjusts the likelihood of findings within a certain +// proximity of hotwords. +type CustomInfoType_DetectionRule_HotwordRule struct { + // Regex pattern defining what qualifies as a hotword. + HotwordRegex *CustomInfoType_Regex `protobuf:"bytes,1,opt,name=hotword_regex,json=hotwordRegex" json:"hotword_regex,omitempty"` + // Proximity of the finding within which the entire hotword must reside. + // The total length of the window cannot exceed 1000 characters. Note that + // the finding itself will be included in the window, so that hotwords may + // be used to match substrings of the finding itself. For example, the + // certainty of a phone number regex "\(\d{3}\) \d{3}-\d{4}" could be + // adjusted upwards if the area code is known to be the local area code of + // a company office using the hotword regex "\(xxx\)", where "xxx" + // is the area code in question. + Proximity *CustomInfoType_DetectionRule_Proximity `protobuf:"bytes,2,opt,name=proximity" json:"proximity,omitempty"` + // Likelihood adjustment to apply to all matching findings. + LikelihoodAdjustment *CustomInfoType_DetectionRule_LikelihoodAdjustment `protobuf:"bytes,3,opt,name=likelihood_adjustment,json=likelihoodAdjustment" json:"likelihood_adjustment,omitempty"` +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) Reset() { + *m = CustomInfoType_DetectionRule_HotwordRule{} +} +func (m *CustomInfoType_DetectionRule_HotwordRule) String() string { return proto.CompactTextString(m) } +func (*CustomInfoType_DetectionRule_HotwordRule) ProtoMessage() {} +func (*CustomInfoType_DetectionRule_HotwordRule) Descriptor() ([]byte, []int) { + return fileDescriptor1, []int{1, 3, 2} +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) GetHotwordRegex() *CustomInfoType_Regex { + if m != nil { + return m.HotwordRegex + } + return nil +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) GetProximity() *CustomInfoType_DetectionRule_Proximity { + if m != nil { + return m.Proximity + } + return nil +} + +func (m *CustomInfoType_DetectionRule_HotwordRule) GetLikelihoodAdjustment() *CustomInfoType_DetectionRule_LikelihoodAdjustment { + if m != nil { + return m.LikelihoodAdjustment + } + return nil +} + +// General identifier of a data field in a storage service. +type FieldId struct { + // Name describing the field. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *FieldId) Reset() { *m = FieldId{} } +func (m *FieldId) String() string { return proto.CompactTextString(m) } +func (*FieldId) ProtoMessage() {} +func (*FieldId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *FieldId) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Datastore partition ID. +// A partition ID identifies a grouping of entities. The grouping is always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +type PartitionId struct { + // The ID of the project to which the entities belong. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // If not empty, the ID of the namespace to which the entities belong. + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId" json:"namespace_id,omitempty"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} +func (*PartitionId) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *PartitionId) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *PartitionId) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +// A representation of a Datastore kind. +type KindExpression struct { + // The name of the kind. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *KindExpression) Reset() { *m = KindExpression{} } +func (m *KindExpression) String() string { return proto.CompactTextString(m) } +func (*KindExpression) ProtoMessage() {} +func (*KindExpression) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *KindExpression) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Options defining a data set within Google Cloud Datastore. +type DatastoreOptions struct { + // A partition ID identifies a grouping of entities. The grouping is always + // by project and namespace, however the namespace ID may be empty. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The kind to process. + Kind *KindExpression `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` +} + +func (m *DatastoreOptions) Reset() { *m = DatastoreOptions{} } +func (m *DatastoreOptions) String() string { return proto.CompactTextString(m) } +func (*DatastoreOptions) ProtoMessage() {} +func (*DatastoreOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *DatastoreOptions) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *DatastoreOptions) GetKind() *KindExpression { + if m != nil { + return m.Kind + } + return nil +} + +// Options defining a file or a set of files (path ending with *) within +// a Google Cloud Storage bucket. +type CloudStorageOptions struct { + FileSet *CloudStorageOptions_FileSet `protobuf:"bytes,1,opt,name=file_set,json=fileSet" json:"file_set,omitempty"` + // Max number of bytes to scan from a file. If a scanned file's size is bigger + // than this value then the rest of the bytes are omitted. + BytesLimitPerFile int64 `protobuf:"varint,4,opt,name=bytes_limit_per_file,json=bytesLimitPerFile" json:"bytes_limit_per_file,omitempty"` +} + +func (m *CloudStorageOptions) Reset() { *m = CloudStorageOptions{} } +func (m *CloudStorageOptions) String() string { return proto.CompactTextString(m) } +func (*CloudStorageOptions) ProtoMessage() {} +func (*CloudStorageOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +func (m *CloudStorageOptions) GetFileSet() *CloudStorageOptions_FileSet { + if m != nil { + return m.FileSet + } + return nil +} + +func (m *CloudStorageOptions) GetBytesLimitPerFile() int64 { + if m != nil { + return m.BytesLimitPerFile + } + return 0 +} + +// Set of files to scan. +type CloudStorageOptions_FileSet struct { + // The url, in the format `gs:///`. Trailing wildcard in the + // path is allowed. + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` +} + +func (m *CloudStorageOptions_FileSet) Reset() { *m = CloudStorageOptions_FileSet{} } +func (m *CloudStorageOptions_FileSet) String() string { return proto.CompactTextString(m) } +func (*CloudStorageOptions_FileSet) ProtoMessage() {} +func (*CloudStorageOptions_FileSet) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6, 0} } + +func (m *CloudStorageOptions_FileSet) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +// Options defining BigQuery table and row identifiers. +type BigQueryOptions struct { + // Complete BigQuery table reference. + TableReference *BigQueryTable `protobuf:"bytes,1,opt,name=table_reference,json=tableReference" json:"table_reference,omitempty"` + // References to fields uniquely identifying rows within the table. + // Nested fields in the format, like `person.birthdate.year`, are allowed. + IdentifyingFields []*FieldId `protobuf:"bytes,2,rep,name=identifying_fields,json=identifyingFields" json:"identifying_fields,omitempty"` +} + +func (m *BigQueryOptions) Reset() { *m = BigQueryOptions{} } +func (m *BigQueryOptions) String() string { return proto.CompactTextString(m) } +func (*BigQueryOptions) ProtoMessage() {} +func (*BigQueryOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *BigQueryOptions) GetTableReference() *BigQueryTable { + if m != nil { + return m.TableReference + } + return nil +} + +func (m *BigQueryOptions) GetIdentifyingFields() []*FieldId { + if m != nil { + return m.IdentifyingFields + } + return nil +} + +// Shared message indicating Cloud storage type. +type StorageConfig struct { + // Types that are valid to be assigned to Type: + // *StorageConfig_DatastoreOptions + // *StorageConfig_CloudStorageOptions + // *StorageConfig_BigQueryOptions + Type isStorageConfig_Type `protobuf_oneof:"type"` + TimespanConfig *StorageConfig_TimespanConfig `protobuf:"bytes,6,opt,name=timespan_config,json=timespanConfig" json:"timespan_config,omitempty"` +} + +func (m *StorageConfig) Reset() { *m = StorageConfig{} } +func (m *StorageConfig) String() string { return proto.CompactTextString(m) } +func (*StorageConfig) ProtoMessage() {} +func (*StorageConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +type isStorageConfig_Type interface { + isStorageConfig_Type() +} + +type StorageConfig_DatastoreOptions struct { + DatastoreOptions *DatastoreOptions `protobuf:"bytes,2,opt,name=datastore_options,json=datastoreOptions,oneof"` +} +type StorageConfig_CloudStorageOptions struct { + CloudStorageOptions *CloudStorageOptions `protobuf:"bytes,3,opt,name=cloud_storage_options,json=cloudStorageOptions,oneof"` +} +type StorageConfig_BigQueryOptions struct { + BigQueryOptions *BigQueryOptions `protobuf:"bytes,4,opt,name=big_query_options,json=bigQueryOptions,oneof"` +} + +func (*StorageConfig_DatastoreOptions) isStorageConfig_Type() {} +func (*StorageConfig_CloudStorageOptions) isStorageConfig_Type() {} +func (*StorageConfig_BigQueryOptions) isStorageConfig_Type() {} + +func (m *StorageConfig) GetType() isStorageConfig_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *StorageConfig) GetDatastoreOptions() *DatastoreOptions { + if x, ok := m.GetType().(*StorageConfig_DatastoreOptions); ok { + return x.DatastoreOptions + } + return nil +} + +func (m *StorageConfig) GetCloudStorageOptions() *CloudStorageOptions { + if x, ok := m.GetType().(*StorageConfig_CloudStorageOptions); ok { + return x.CloudStorageOptions + } + return nil +} + +func (m *StorageConfig) GetBigQueryOptions() *BigQueryOptions { + if x, ok := m.GetType().(*StorageConfig_BigQueryOptions); ok { + return x.BigQueryOptions + } + return nil +} + +func (m *StorageConfig) GetTimespanConfig() *StorageConfig_TimespanConfig { + if m != nil { + return m.TimespanConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StorageConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StorageConfig_OneofMarshaler, _StorageConfig_OneofUnmarshaler, _StorageConfig_OneofSizer, []interface{}{ + (*StorageConfig_DatastoreOptions)(nil), + (*StorageConfig_CloudStorageOptions)(nil), + (*StorageConfig_BigQueryOptions)(nil), + } +} + +func _StorageConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StorageConfig) + // type + switch x := m.Type.(type) { + case *StorageConfig_DatastoreOptions: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatastoreOptions); err != nil { + return err + } + case *StorageConfig_CloudStorageOptions: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CloudStorageOptions); err != nil { + return err + } + case *StorageConfig_BigQueryOptions: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BigQueryOptions); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StorageConfig.Type has unexpected type %T", x) + } + return nil +} + +func _StorageConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StorageConfig) + switch tag { + case 2: // type.datastore_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatastoreOptions) + err := b.DecodeMessage(msg) + m.Type = &StorageConfig_DatastoreOptions{msg} + return true, err + case 3: // type.cloud_storage_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(CloudStorageOptions) + err := b.DecodeMessage(msg) + m.Type = &StorageConfig_CloudStorageOptions{msg} + return true, err + case 4: // type.big_query_options + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BigQueryOptions) + err := b.DecodeMessage(msg) + m.Type = &StorageConfig_BigQueryOptions{msg} + return true, err + default: + return false, nil + } +} + +func _StorageConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StorageConfig) + // type + switch x := m.Type.(type) { + case *StorageConfig_DatastoreOptions: + s := proto.Size(x.DatastoreOptions) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StorageConfig_CloudStorageOptions: + s := proto.Size(x.CloudStorageOptions) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StorageConfig_BigQueryOptions: + s := proto.Size(x.BigQueryOptions) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Configuration of the timespan of the items to include in scanning. +// Currently only supported when inspecting Google Cloud Storage and BigQuery. +type StorageConfig_TimespanConfig struct { + // Exclude files older than this value. + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // Exclude files newer than this value. + // If set to zero, no upper time limit is applied. + EndTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + // Specification of the field containing the timestamp of scanned items. + // Required for data sources like Datastore or BigQuery. + // The valid data types of the timestamp field are: + // for BigQuery - timestamp, date, datetime; + // for Datastore - timestamp. + // Datastore entity will be scanned if the timestamp property does not exist + // or its value is empty or invalid. + TimestampField *FieldId `protobuf:"bytes,3,opt,name=timestamp_field,json=timestampField" json:"timestamp_field,omitempty"` + // When the job is started by a JobTrigger we will automatically figure out + // a valid start_time to avoid scanning files that have not been modified + // since the last time the JobTrigger executed. This will be based on the + // time of the execution of the last run of the JobTrigger. + EnableAutoPopulationOfTimespanConfig bool `protobuf:"varint,4,opt,name=enable_auto_population_of_timespan_config,json=enableAutoPopulationOfTimespanConfig" json:"enable_auto_population_of_timespan_config,omitempty"` +} + +func (m *StorageConfig_TimespanConfig) Reset() { *m = StorageConfig_TimespanConfig{} } +func (m *StorageConfig_TimespanConfig) String() string { return proto.CompactTextString(m) } +func (*StorageConfig_TimespanConfig) ProtoMessage() {} +func (*StorageConfig_TimespanConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8, 0} } + +func (m *StorageConfig_TimespanConfig) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *StorageConfig_TimespanConfig) GetEndTime() *google_protobuf1.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *StorageConfig_TimespanConfig) GetTimestampField() *FieldId { + if m != nil { + return m.TimestampField + } + return nil +} + +func (m *StorageConfig_TimespanConfig) GetEnableAutoPopulationOfTimespanConfig() bool { + if m != nil { + return m.EnableAutoPopulationOfTimespanConfig + } + return false +} + +// Row key for identifying a record in BigQuery table. +type BigQueryKey struct { + // Complete BigQuery table reference. + TableReference *BigQueryTable `protobuf:"bytes,1,opt,name=table_reference,json=tableReference" json:"table_reference,omitempty"` + // Absolute number of the row from the beginning of the table at the time + // of scanning. + RowNumber int64 `protobuf:"varint,2,opt,name=row_number,json=rowNumber" json:"row_number,omitempty"` +} + +func (m *BigQueryKey) Reset() { *m = BigQueryKey{} } +func (m *BigQueryKey) String() string { return proto.CompactTextString(m) } +func (*BigQueryKey) ProtoMessage() {} +func (*BigQueryKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *BigQueryKey) GetTableReference() *BigQueryTable { + if m != nil { + return m.TableReference + } + return nil +} + +func (m *BigQueryKey) GetRowNumber() int64 { + if m != nil { + return m.RowNumber + } + return 0 +} + +// Record key for a finding in Cloud Datastore. +type DatastoreKey struct { + // Datastore entity key. + EntityKey *Key `protobuf:"bytes,1,opt,name=entity_key,json=entityKey" json:"entity_key,omitempty"` +} + +func (m *DatastoreKey) Reset() { *m = DatastoreKey{} } +func (m *DatastoreKey) String() string { return proto.CompactTextString(m) } +func (*DatastoreKey) ProtoMessage() {} +func (*DatastoreKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *DatastoreKey) GetEntityKey() *Key { + if m != nil { + return m.EntityKey + } + return nil +} + +// A unique identifier for a Datastore entity. +// If a key's partition ID or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a project + // ID and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a _root entity_, the second element identifies + // a _child_ of the root entity, the third element identifies a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's _ancestors_. + // + // A path can never be empty, and a path can have at most 100 elements. + Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *Key) GetPartitionId() *PartitionId { + if m != nil { + return m.PartitionId + } + return nil +} + +func (m *Key) GetPath() []*Key_PathElement { + if m != nil { + return m.Path + } + return nil +} + +// A (kind, ID/name) pair used to construct a key path. +// +// If either name or ID is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex `__.*__` is reserved/read-only. + // A kind must not contain more than 1500 bytes when UTF-8 encoded. + // Cannot be `""`. + Kind string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` + // The type of ID. + // + // Types that are valid to be assigned to IdType: + // *Key_PathElement_Id + // *Key_PathElement_Name + IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} +func (*Key_PathElement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 0} } + +type isKey_PathElement_IdType interface { + isKey_PathElement_IdType() +} + +type Key_PathElement_Id struct { + Id int64 `protobuf:"varint,2,opt,name=id,oneof"` +} +type Key_PathElement_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,oneof"` +} + +func (*Key_PathElement_Id) isKey_PathElement_IdType() {} +func (*Key_PathElement_Name) isKey_PathElement_IdType() {} + +func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType { + if m != nil { + return m.IdType + } + return nil +} + +func (m *Key_PathElement) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *Key_PathElement) GetId() int64 { + if x, ok := m.GetIdType().(*Key_PathElement_Id); ok { + return x.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if x, ok := m.GetIdType().(*Key_PathElement_Name); ok { + return x.Name + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{ + (*Key_PathElement_Id)(nil), + (*Key_PathElement_Name)(nil), + } +} + +func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case nil: + default: + return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x) + } + return nil +} + +func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key_PathElement) + switch tag { + case 2: // id_type.id + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.IdType = &Key_PathElement_Id{int64(x)} + return true, err + case 3: // id_type.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdType = &Key_PathElement_Name{x} + return true, err + default: + return false, nil + } +} + +func _Key_PathElement_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message for a unique key indicating a record that contains a finding. +type RecordKey struct { + // Types that are valid to be assigned to Type: + // *RecordKey_DatastoreKey + // *RecordKey_BigQueryKey + Type isRecordKey_Type `protobuf_oneof:"type"` +} + +func (m *RecordKey) Reset() { *m = RecordKey{} } +func (m *RecordKey) String() string { return proto.CompactTextString(m) } +func (*RecordKey) ProtoMessage() {} +func (*RecordKey) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +type isRecordKey_Type interface { + isRecordKey_Type() +} + +type RecordKey_DatastoreKey struct { + DatastoreKey *DatastoreKey `protobuf:"bytes,2,opt,name=datastore_key,json=datastoreKey,oneof"` +} +type RecordKey_BigQueryKey struct { + BigQueryKey *BigQueryKey `protobuf:"bytes,3,opt,name=big_query_key,json=bigQueryKey,oneof"` +} + +func (*RecordKey_DatastoreKey) isRecordKey_Type() {} +func (*RecordKey_BigQueryKey) isRecordKey_Type() {} + +func (m *RecordKey) GetType() isRecordKey_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *RecordKey) GetDatastoreKey() *DatastoreKey { + if x, ok := m.GetType().(*RecordKey_DatastoreKey); ok { + return x.DatastoreKey + } + return nil +} + +func (m *RecordKey) GetBigQueryKey() *BigQueryKey { + if x, ok := m.GetType().(*RecordKey_BigQueryKey); ok { + return x.BigQueryKey + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RecordKey) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RecordKey_OneofMarshaler, _RecordKey_OneofUnmarshaler, _RecordKey_OneofSizer, []interface{}{ + (*RecordKey_DatastoreKey)(nil), + (*RecordKey_BigQueryKey)(nil), + } +} + +func _RecordKey_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RecordKey) + // type + switch x := m.Type.(type) { + case *RecordKey_DatastoreKey: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DatastoreKey); err != nil { + return err + } + case *RecordKey_BigQueryKey: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BigQueryKey); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RecordKey.Type has unexpected type %T", x) + } + return nil +} + +func _RecordKey_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RecordKey) + switch tag { + case 2: // type.datastore_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DatastoreKey) + err := b.DecodeMessage(msg) + m.Type = &RecordKey_DatastoreKey{msg} + return true, err + case 3: // type.big_query_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BigQueryKey) + err := b.DecodeMessage(msg) + m.Type = &RecordKey_BigQueryKey{msg} + return true, err + default: + return false, nil + } +} + +func _RecordKey_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RecordKey) + // type + switch x := m.Type.(type) { + case *RecordKey_DatastoreKey: + s := proto.Size(x.DatastoreKey) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *RecordKey_BigQueryKey: + s := proto.Size(x.BigQueryKey) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Message defining the location of a BigQuery table. A table is uniquely +// identified by its project_id, dataset_id, and table_name. Within a query +// a table is often referenced with a string in the format of: +// `:.` or +// `..`. +type BigQueryTable struct { + // The Google Cloud Platform project ID of the project containing the table. + // If omitted, project ID is inferred from the API call. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Dataset ID of the table. + DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId" json:"dataset_id,omitempty"` + // Name of the table. + TableId string `protobuf:"bytes,3,opt,name=table_id,json=tableId" json:"table_id,omitempty"` +} + +func (m *BigQueryTable) Reset() { *m = BigQueryTable{} } +func (m *BigQueryTable) String() string { return proto.CompactTextString(m) } +func (*BigQueryTable) ProtoMessage() {} +func (*BigQueryTable) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *BigQueryTable) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *BigQueryTable) GetDatasetId() string { + if m != nil { + return m.DatasetId + } + return "" +} + +func (m *BigQueryTable) GetTableId() string { + if m != nil { + return m.TableId + } + return "" +} + +func init() { + proto.RegisterType((*InfoType)(nil), "google.privacy.dlp.v2.InfoType") + proto.RegisterType((*CustomInfoType)(nil), "google.privacy.dlp.v2.CustomInfoType") + proto.RegisterType((*CustomInfoType_Dictionary)(nil), "google.privacy.dlp.v2.CustomInfoType.Dictionary") + proto.RegisterType((*CustomInfoType_Dictionary_WordList)(nil), "google.privacy.dlp.v2.CustomInfoType.Dictionary.WordList") + proto.RegisterType((*CustomInfoType_Regex)(nil), "google.privacy.dlp.v2.CustomInfoType.Regex") + proto.RegisterType((*CustomInfoType_SurrogateType)(nil), "google.privacy.dlp.v2.CustomInfoType.SurrogateType") + proto.RegisterType((*CustomInfoType_DetectionRule)(nil), "google.privacy.dlp.v2.CustomInfoType.DetectionRule") + proto.RegisterType((*CustomInfoType_DetectionRule_Proximity)(nil), "google.privacy.dlp.v2.CustomInfoType.DetectionRule.Proximity") + proto.RegisterType((*CustomInfoType_DetectionRule_LikelihoodAdjustment)(nil), "google.privacy.dlp.v2.CustomInfoType.DetectionRule.LikelihoodAdjustment") + proto.RegisterType((*CustomInfoType_DetectionRule_HotwordRule)(nil), "google.privacy.dlp.v2.CustomInfoType.DetectionRule.HotwordRule") + proto.RegisterType((*FieldId)(nil), "google.privacy.dlp.v2.FieldId") + proto.RegisterType((*PartitionId)(nil), "google.privacy.dlp.v2.PartitionId") + proto.RegisterType((*KindExpression)(nil), "google.privacy.dlp.v2.KindExpression") + proto.RegisterType((*DatastoreOptions)(nil), "google.privacy.dlp.v2.DatastoreOptions") + proto.RegisterType((*CloudStorageOptions)(nil), "google.privacy.dlp.v2.CloudStorageOptions") + proto.RegisterType((*CloudStorageOptions_FileSet)(nil), "google.privacy.dlp.v2.CloudStorageOptions.FileSet") + proto.RegisterType((*BigQueryOptions)(nil), "google.privacy.dlp.v2.BigQueryOptions") + proto.RegisterType((*StorageConfig)(nil), "google.privacy.dlp.v2.StorageConfig") + proto.RegisterType((*StorageConfig_TimespanConfig)(nil), "google.privacy.dlp.v2.StorageConfig.TimespanConfig") + proto.RegisterType((*BigQueryKey)(nil), "google.privacy.dlp.v2.BigQueryKey") + proto.RegisterType((*DatastoreKey)(nil), "google.privacy.dlp.v2.DatastoreKey") + proto.RegisterType((*Key)(nil), "google.privacy.dlp.v2.Key") + proto.RegisterType((*Key_PathElement)(nil), "google.privacy.dlp.v2.Key.PathElement") + proto.RegisterType((*RecordKey)(nil), "google.privacy.dlp.v2.RecordKey") + proto.RegisterType((*BigQueryTable)(nil), "google.privacy.dlp.v2.BigQueryTable") + proto.RegisterEnum("google.privacy.dlp.v2.Likelihood", Likelihood_name, Likelihood_value) +} + +func init() { proto.RegisterFile("google/privacy/dlp/v2/storage.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1517 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xef, 0x6e, 0x1b, 0x37, + 0x12, 0xd7, 0x4a, 0x96, 0x2d, 0x8d, 0xac, 0x3f, 0xa6, 0x9d, 0x83, 0xa2, 0x3b, 0x27, 0x8e, 0x92, + 0xcb, 0xe5, 0x72, 0x80, 0x74, 0x75, 0x50, 0xa0, 0xee, 0x1f, 0x14, 0x96, 0xa5, 0x44, 0xaa, 0x1d, + 0x5b, 0xa5, 0x1d, 0xa7, 0x69, 0x0d, 0x6c, 0x57, 0x22, 0x25, 0x33, 0x59, 0x2d, 0xb7, 0xbb, 0x94, + 0x6d, 0xa1, 0xe8, 0x33, 0x14, 0x28, 0xfa, 0x08, 0x05, 0x8a, 0x16, 0x05, 0x0a, 0xf4, 0x63, 0xfb, + 0x06, 0x7d, 0x88, 0x3e, 0x45, 0x1f, 0xa0, 0x20, 0xb9, 0xbb, 0x92, 0x1c, 0xab, 0x76, 0x82, 0x7c, + 0xd2, 0xce, 0x70, 0x7e, 0xc3, 0xe1, 0x6f, 0x86, 0x33, 0x14, 0xdc, 0xee, 0x73, 0xde, 0xb7, 0x69, + 0xd5, 0xf5, 0xd8, 0x89, 0xd5, 0x1d, 0x55, 0x89, 0xed, 0x56, 0x4f, 0xd6, 0xab, 0xbe, 0xe0, 0x9e, + 0xd5, 0xa7, 0x15, 0xd7, 0xe3, 0x82, 0xa3, 0x6b, 0xda, 0xa8, 0x12, 0x18, 0x55, 0x88, 0xed, 0x56, + 0x4e, 0xd6, 0x4b, 0xff, 0x0a, 0xb0, 0x96, 0xcb, 0xaa, 0x96, 0xe3, 0x70, 0x61, 0x09, 0xc6, 0x1d, + 0x5f, 0x83, 0x4a, 0x37, 0x23, 0xcf, 0x5c, 0xf0, 0xce, 0xb0, 0x57, 0x15, 0x6c, 0x40, 0x7d, 0x61, + 0x0d, 0x5c, 0x6d, 0x50, 0xbe, 0x01, 0xa9, 0x96, 0xd3, 0xe3, 0x07, 0x23, 0x97, 0x22, 0x04, 0x73, + 0x8e, 0x35, 0xa0, 0x45, 0x63, 0xcd, 0xb8, 0x97, 0xc6, 0xea, 0xbb, 0xfc, 0x27, 0x40, 0x6e, 0x6b, + 0xe8, 0x0b, 0x3e, 0x88, 0xcc, 0xde, 0x87, 0x34, 0x73, 0x7a, 0xdc, 0x14, 0x23, 0x57, 0xdb, 0x66, + 0xd6, 0x6f, 0x56, 0x2e, 0x0c, 0xae, 0x12, 0x62, 0x70, 0x8a, 0x85, 0xe8, 0x4d, 0x00, 0x9b, 0xbd, + 0xa0, 0x36, 0x3b, 0xe6, 0x9c, 0x14, 0xe7, 0xd7, 0x8c, 0x7b, 0xb9, 0xf5, 0x5b, 0x33, 0xe0, 0x3b, + 0x91, 0x21, 0x9e, 0x00, 0x21, 0x0c, 0x40, 0x58, 0x57, 0x1e, 0xd3, 0xf2, 0x46, 0xc5, 0xb8, 0x8a, + 0xe0, 0xff, 0x33, 0x5c, 0x4c, 0xc7, 0x5e, 0xa9, 0x47, 0xb8, 0x66, 0x0c, 0x4f, 0x78, 0x41, 0x5b, + 0x90, 0xf4, 0x68, 0x9f, 0x9e, 0x15, 0x13, 0xca, 0xdd, 0xff, 0xae, 0xe6, 0x0e, 0x4b, 0x48, 0x33, + 0x86, 0x35, 0x16, 0x1d, 0x41, 0xce, 0x1f, 0x7a, 0x1e, 0xef, 0x5b, 0x82, 0x6a, 0x7a, 0xe6, 0x94, + 0xb7, 0x07, 0x57, 0xf3, 0xb6, 0x1f, 0x62, 0xa5, 0xd4, 0x8c, 0xe1, 0xac, 0x3f, 0xa9, 0x40, 0x47, + 0x90, 0x27, 0x54, 0x50, 0x15, 0xb2, 0xe9, 0x0d, 0x6d, 0xea, 0x17, 0x17, 0xd6, 0x12, 0x57, 0x77, + 0x5f, 0x0f, 0xc1, 0x78, 0x68, 0x53, 0x9c, 0x23, 0x93, 0xa2, 0x5f, 0xfa, 0xc6, 0x00, 0x18, 0xb3, + 0x83, 0x3e, 0x81, 0xf4, 0x29, 0xf7, 0x88, 0x69, 0x33, 0x5f, 0x04, 0x49, 0xde, 0x78, 0x55, 0x8a, + 0x2b, 0x4f, 0xb9, 0x47, 0x76, 0x98, 0x2f, 0x9a, 0x31, 0x9c, 0x3a, 0x0d, 0xbe, 0x4b, 0x6b, 0x90, + 0x0a, 0xf5, 0x68, 0x05, 0x92, 0x52, 0xef, 0x17, 0x8d, 0xb5, 0xc4, 0xbd, 0x34, 0xd6, 0x42, 0x2d, + 0x05, 0xf3, 0x3e, 0x1f, 0x7a, 0x5d, 0x5a, 0xba, 0x05, 0x49, 0x45, 0x31, 0x2a, 0xc2, 0x82, 0x6b, + 0x09, 0x41, 0x3d, 0x27, 0xa8, 0xce, 0x50, 0x2c, 0xe5, 0x21, 0x3b, 0xc5, 0x5b, 0xe9, 0xd7, 0x24, + 0x64, 0xa7, 0x8e, 0x8a, 0x08, 0x2c, 0x1e, 0x73, 0xa1, 0x8e, 0x23, 0x69, 0x0b, 0x8e, 0xf3, 0xe1, + 0x6b, 0xb0, 0x56, 0x69, 0x6a, 0x3f, 0xf2, 0xbb, 0x19, 0xc3, 0x99, 0xe3, 0xb1, 0x58, 0xda, 0x87, + 0x74, 0xdb, 0xe3, 0x67, 0x6c, 0xc0, 0xc4, 0x08, 0xdd, 0x86, 0xec, 0x29, 0x73, 0x08, 0x3f, 0x35, + 0x3b, 0xb4, 0xc7, 0x3d, 0xbd, 0x67, 0x12, 0x2f, 0x6a, 0x65, 0x4d, 0xe9, 0xd0, 0x2d, 0x08, 0x64, + 0xd3, 0xea, 0x09, 0xea, 0xa9, 0x4a, 0x4e, 0xe2, 0x8c, 0xd6, 0x6d, 0x4a, 0x55, 0xe9, 0x7b, 0x03, + 0x56, 0xc6, 0xb7, 0x60, 0x93, 0x3c, 0x1f, 0xfa, 0x62, 0x40, 0x1d, 0x81, 0x76, 0xa1, 0xd0, 0x63, + 0x67, 0x54, 0x26, 0x28, 0xba, 0x4c, 0xc6, 0x15, 0x2f, 0x53, 0x33, 0x86, 0xf3, 0x0a, 0x3c, 0x56, + 0xa1, 0xb7, 0x60, 0xd9, 0xa3, 0xb6, 0x25, 0xd8, 0x09, 0x9d, 0x74, 0xa9, 0x42, 0x6a, 0xc6, 0x30, + 0x0a, 0x17, 0xc7, 0x90, 0xda, 0x22, 0x80, 0x15, 0x05, 0x54, 0xfa, 0x2d, 0x0e, 0x99, 0x09, 0x76, + 0x50, 0x1b, 0xb2, 0x11, 0xe9, 0xea, 0x62, 0x19, 0xaf, 0x7c, 0xb1, 0x70, 0x98, 0x36, 0x5d, 0x03, + 0x9f, 0x41, 0xda, 0x0d, 0x09, 0x0e, 0x6e, 0xfd, 0x07, 0xaf, 0x93, 0xc3, 0x28, 0x4b, 0x78, 0xec, + 0x0f, 0x7d, 0x05, 0xd7, 0xc6, 0xc7, 0x36, 0xc7, 0xe7, 0x0a, 0xfa, 0x41, 0xf3, 0x75, 0x36, 0xba, + 0x28, 0x71, 0x78, 0xc5, 0xbe, 0x40, 0x5b, 0x9b, 0x87, 0x39, 0xd9, 0x2f, 0xc2, 0xdf, 0xf2, 0x2a, + 0x2c, 0x3c, 0x64, 0xd4, 0x26, 0x2d, 0x72, 0x61, 0x57, 0xde, 0x83, 0x4c, 0xdb, 0xf2, 0x04, 0x93, + 0x5b, 0xb5, 0x08, 0x5a, 0x05, 0x70, 0x3d, 0xfe, 0x9c, 0x76, 0x85, 0xc9, 0x74, 0xce, 0xd2, 0xea, + 0x6c, 0x52, 0xd3, 0x22, 0xb2, 0xce, 0x24, 0xca, 0x77, 0xad, 0x2e, 0x95, 0x06, 0x73, 0xca, 0x20, + 0x13, 0xe9, 0x5a, 0xa4, 0x7c, 0x07, 0x72, 0xdb, 0xcc, 0x21, 0x8d, 0x33, 0xd7, 0xa3, 0xbe, 0xcf, + 0xb8, 0x73, 0xe1, 0xb6, 0xdf, 0x1a, 0x50, 0xa8, 0x5b, 0xc2, 0x92, 0x83, 0x89, 0xee, 0xb9, 0x6a, + 0xd0, 0xa0, 0x06, 0x2c, 0xba, 0x61, 0x2c, 0xd2, 0xbb, 0xce, 0x73, 0x79, 0x06, 0x61, 0x13, 0x61, + 0xe3, 0x8c, 0x3b, 0x71, 0x86, 0x0d, 0x98, 0x7b, 0xc1, 0x1c, 0x12, 0x24, 0xf6, 0xdf, 0x33, 0xe0, + 0xd3, 0x41, 0x62, 0x05, 0x29, 0xff, 0x62, 0xc0, 0xf2, 0x96, 0xcd, 0x87, 0x64, 0x5f, 0x0f, 0xcc, + 0x30, 0xb2, 0xc7, 0x90, 0xea, 0x31, 0x9b, 0x9a, 0x3e, 0x0d, 0x5b, 0xd8, 0xfa, 0xac, 0x34, 0xbe, + 0x8c, 0xae, 0x3c, 0x64, 0x36, 0xdd, 0xa7, 0x02, 0x2f, 0xf4, 0xf4, 0x07, 0xaa, 0xc2, 0x4a, 0x67, + 0x24, 0xa8, 0x6f, 0xda, 0xb2, 0x64, 0x4c, 0x97, 0x7a, 0xa6, 0x5c, 0x52, 0x74, 0x26, 0xf0, 0x92, + 0x5a, 0xdb, 0x91, 0x4b, 0x6d, 0xea, 0x49, 0x70, 0xe9, 0x9f, 0x32, 0x89, 0x1a, 0x5b, 0x80, 0xc4, + 0xd0, 0xb3, 0x03, 0x32, 0xe5, 0x67, 0xf9, 0x67, 0x03, 0xf2, 0x35, 0xd6, 0xff, 0x78, 0x48, 0xbd, + 0xd1, 0x38, 0xe0, 0xbc, 0xb0, 0x3a, 0x36, 0x35, 0x3d, 0xda, 0xa3, 0x1e, 0x75, 0xba, 0x61, 0xaf, + 0xba, 0x33, 0x23, 0xee, 0xd0, 0xc1, 0x81, 0x44, 0xe1, 0x9c, 0x02, 0xe3, 0x10, 0x8b, 0x1e, 0x03, + 0x62, 0x84, 0x3a, 0x82, 0xf5, 0x46, 0xcc, 0xe9, 0x9b, 0x3d, 0x59, 0x50, 0x7e, 0x31, 0xae, 0x66, + 0xc6, 0x8d, 0x19, 0x1e, 0x83, 0xaa, 0xc3, 0x4b, 0x13, 0x48, 0xa5, 0xf3, 0xcb, 0x3f, 0x25, 0x21, + 0x1b, 0x70, 0xb4, 0xc5, 0x9d, 0x1e, 0xeb, 0xa3, 0x43, 0x58, 0x22, 0x61, 0x39, 0x98, 0x5c, 0x1f, + 0x22, 0x48, 0xe0, 0x7f, 0x66, 0xf8, 0x3f, 0x5f, 0x3e, 0xcd, 0x18, 0x2e, 0x90, 0xf3, 0x25, 0xf5, + 0x39, 0x5c, 0xeb, 0xca, 0x8c, 0x98, 0xc1, 0x0b, 0x28, 0xf2, 0xad, 0x2f, 0xe3, 0xfd, 0xab, 0x67, + 0xb1, 0x19, 0xc3, 0xcb, 0xdd, 0x0b, 0x4a, 0xe3, 0x00, 0x96, 0x3a, 0xac, 0x6f, 0x7e, 0x21, 0xc9, + 0x8b, 0xbc, 0xeb, 0x61, 0x7d, 0xf7, 0x12, 0xae, 0xc7, 0x9e, 0xf3, 0x9d, 0x73, 0xf9, 0x3b, 0x82, + 0xbc, 0x7a, 0x5f, 0xb9, 0x96, 0x63, 0x76, 0x15, 0x45, 0xea, 0x81, 0x33, 0x7b, 0x42, 0x4f, 0xd1, + 0x59, 0x39, 0x08, 0xb0, 0x5a, 0xc4, 0x39, 0x31, 0x25, 0x97, 0xbe, 0x8b, 0x43, 0x6e, 0xda, 0x04, + 0x6d, 0x00, 0xf8, 0xc2, 0xf2, 0x84, 0x29, 0x4d, 0x83, 0x5a, 0x29, 0x8d, 0xf7, 0xd2, 0x6f, 0x3e, + 0xed, 0x57, 0xbe, 0xf9, 0x70, 0x5a, 0x59, 0x4b, 0x19, 0xbd, 0x0d, 0x29, 0xea, 0x10, 0x0d, 0x8c, + 0x5f, 0x0a, 0x5c, 0xa0, 0x0e, 0x51, 0xb0, 0x47, 0xc1, 0x11, 0xa5, 0x56, 0x57, 0x54, 0x90, 0x94, + 0xcb, 0x0a, 0x2a, 0x17, 0xc1, 0x94, 0x06, 0x3d, 0x85, 0xff, 0x52, 0x47, 0x15, 0xbb, 0x35, 0x14, + 0xdc, 0x74, 0xb9, 0x3b, 0xb4, 0xd5, 0xd3, 0xd5, 0xe4, 0x3d, 0xf3, 0x3c, 0x8b, 0x32, 0x33, 0x29, + 0x7c, 0x47, 0x03, 0x36, 0x87, 0x82, 0xb7, 0x23, 0xf3, 0xbd, 0xde, 0x34, 0x27, 0x51, 0x0b, 0xfd, + 0x12, 0x32, 0x61, 0xca, 0xb6, 0xe9, 0xe8, 0x4d, 0xdf, 0xad, 0x55, 0x00, 0x8f, 0x9f, 0x9a, 0xce, + 0x70, 0xd0, 0x09, 0x26, 0x77, 0x02, 0xa7, 0x3d, 0x7e, 0xba, 0xab, 0x14, 0xe5, 0x16, 0x2c, 0x46, + 0x95, 0x2e, 0x77, 0xdf, 0x00, 0x90, 0xd7, 0x49, 0x8c, 0xcc, 0x17, 0x74, 0xf4, 0x72, 0xa2, 0xa6, + 0x7b, 0x1c, 0x1d, 0xe1, 0xb4, 0xb6, 0xde, 0xa6, 0xa3, 0xf2, 0x1f, 0x06, 0x24, 0xa4, 0x8b, 0x37, + 0xd4, 0x67, 0xdf, 0x85, 0x39, 0xd7, 0x12, 0xc7, 0x41, 0x1b, 0xb8, 0x3b, 0x3b, 0x86, 0x4a, 0xdb, + 0x12, 0xc7, 0x0d, 0x9b, 0xaa, 0xa9, 0xa5, 0x30, 0xa5, 0x03, 0x39, 0x76, 0x22, 0xa5, 0x1c, 0x11, + 0xaa, 0x65, 0x07, 0x23, 0x42, 0x7e, 0xa3, 0x02, 0xc4, 0x83, 0x11, 0x94, 0x68, 0xc6, 0x70, 0x9c, + 0x11, 0xb4, 0x12, 0x0c, 0x12, 0x59, 0x26, 0xe9, 0x66, 0x4c, 0x8f, 0x92, 0x5a, 0x1a, 0x16, 0x18, + 0x51, 0x6f, 0xe4, 0xf2, 0x0f, 0x06, 0xa4, 0x31, 0xed, 0x72, 0x8f, 0xc8, 0x63, 0x7e, 0x04, 0xd9, + 0x71, 0x4f, 0x91, 0x64, 0xe9, 0xe2, 0xbc, 0x7d, 0x59, 0x3f, 0xd9, 0xa6, 0xf2, 0x49, 0xbf, 0x48, + 0x26, 0x59, 0x6f, 0x42, 0x76, 0x7c, 0xcb, 0xa5, 0xaf, 0xc4, 0xdf, 0x72, 0x36, 0x51, 0x2e, 0xf2, + 0x71, 0xd7, 0x19, 0x8b, 0x51, 0x51, 0x1d, 0x43, 0x76, 0xaa, 0x2e, 0xce, 0x8d, 0x5e, 0xe3, 0xfc, + 0xe8, 0x5d, 0x05, 0x50, 0x11, 0xd1, 0xc9, 0xc9, 0x1c, 0x68, 0x5a, 0x04, 0x5d, 0x87, 0x94, 0x2e, + 0x4a, 0xa6, 0xaf, 0x51, 0x1a, 0x2f, 0x28, 0xb9, 0x45, 0xee, 0x0b, 0x80, 0x89, 0xe7, 0x59, 0x09, + 0xfe, 0xb1, 0xd3, 0xda, 0x6e, 0xec, 0xb4, 0x9a, 0x7b, 0x7b, 0x75, 0xf3, 0xc9, 0xee, 0x7e, 0xbb, + 0xb1, 0xd5, 0x7a, 0xd8, 0x6a, 0xd4, 0x0b, 0x31, 0xb4, 0x04, 0xd9, 0xc3, 0x06, 0x7e, 0x66, 0x3e, + 0xd9, 0x55, 0x26, 0xcf, 0x0a, 0x06, 0x5a, 0x84, 0x54, 0x24, 0xc5, 0xa5, 0xd4, 0xde, 0xdb, 0xdf, + 0x6f, 0xd5, 0x76, 0x1a, 0x85, 0x04, 0x02, 0x98, 0x0f, 0x56, 0xe6, 0x50, 0x1e, 0x32, 0x0a, 0x1a, + 0x28, 0x92, 0xb5, 0xaf, 0x0d, 0xb8, 0xde, 0xe5, 0x83, 0x8b, 0x09, 0xaa, 0x41, 0xdd, 0x76, 0x83, + 0x96, 0xd5, 0x36, 0x3e, 0x7d, 0x27, 0x30, 0xea, 0x73, 0xdb, 0x72, 0xfa, 0x15, 0xee, 0xf5, 0xab, + 0x7d, 0xea, 0xa8, 0xe6, 0x51, 0xd5, 0x4b, 0x96, 0xcb, 0xfc, 0x73, 0x7f, 0x6a, 0xdf, 0x23, 0xb6, + 0xfb, 0x63, 0x7c, 0xf9, 0x91, 0x86, 0xaa, 0x8e, 0x5d, 0xa9, 0xdb, 0x6e, 0xe5, 0x70, 0xfd, 0xf7, + 0x50, 0x7b, 0xa4, 0xb4, 0x47, 0x75, 0xdb, 0x3d, 0x3a, 0x5c, 0xef, 0xcc, 0x2b, 0x87, 0x0f, 0xfe, + 0x0a, 0x00, 0x00, 0xff, 0xff, 0x23, 0xed, 0x0b, 0x18, 0x24, 0x0f, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go index 27125f188..6feb97aac 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/spanner.pb.go @@ -374,10 +374,16 @@ func (m *ExecuteSqlRequest) GetPartitionToken() []byte { // Options for a PartitionQueryRequest and // PartitionReadRequest. type PartitionOptions struct { + // **Note:** This hint is currently ignored by PartitionQuery and + // PartitionRead requests. + // // The desired data size for each partition generated. The default for this // option is currently 1 GiB. This is only a hint. The actual size of each // partition may be smaller or larger than this size request. PartitionSizeBytes int64 `protobuf:"varint,1,opt,name=partition_size_bytes,json=partitionSizeBytes" json:"partition_size_bytes,omitempty"` + // **Note:** This hint is currently ignored by PartitionQuery and + // PartitionRead requests. + // // The desired maximum number of partitions to return. For example, this may // be set to the number of workers available. The default for this option // is currently 10,000. The maximum value is currently 200,000. This is only diff --git a/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go index 499cad60f..3fd4475b6 100644 --- a/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/spanner/v1/type.pb.go @@ -34,6 +34,12 @@ const ( TypeCode_FLOAT64 TypeCode = 3 // Encoded as `string` in RFC 3339 timestamp format. The time zone // must be present, and must be `"Z"`. + // + // If the schema has the column option + // `allow_commit_timestamp=true`, the placeholder string + // `"spanner.commit_timestamp()"` can be used to instruct the system + // to insert the commit timestamp associated with the transaction + // commit. TypeCode_TIMESTAMP TypeCode = 4 // Encoded as `string` in RFC 3339 date format. TypeCode_DATE TypeCode = 5 diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md index 2ed3314c7..b50c6e877 100644 --- a/vendor/gopkg.in/yaml.v2/README.md +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -48,8 +48,6 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS Example ------- -Some more examples can be found in the "examples" folder. - ```Go package main diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index c8eac1642..e4e56e28e 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -113,6 +113,10 @@ func (p *parser) fail() { var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } @@ -430,6 +434,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { // reasons we set it as a string, so that code that unmarshals // timestamp-like values into interface{} will continue to // see a string and not a time.Time. + // TODO(v3) Drop this. out.Set(reflect.ValueOf(n.value)) } else { out.Set(reflect.ValueOf(resolved)) @@ -542,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { switch out.Kind() { case reflect.Slice: out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } case reflect.Interface: // No type hints. Will have to use a generic sequence. iface = out @@ -560,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j++ } } - out.Set(out.Slice(0, j)) + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } if iface.IsValid() { iface.Set(out) } diff --git a/vendor/gopkg.in/yaml.v2/decode_test.go b/vendor/gopkg.in/yaml.v2/decode_test.go index 7e1e8b336..9269f12b8 100644 --- a/vendor/gopkg.in/yaml.v2/decode_test.go +++ b/vendor/gopkg.in/yaml.v2/decode_test.go @@ -129,6 +129,9 @@ var unmarshalTests = []struct { }, { "bin: -0b101010", map[string]interface{}{"bin": -42}, + }, { + "bin: -0b1000000000000000000000000000000000000000000000000000000000000000", + map[string]interface{}{"bin": -9223372036854775808}, }, { "decimal: +685_230", map[string]int{"decimal": 685230}, @@ -241,6 +244,9 @@ var unmarshalTests = []struct { }, { "a: [1, 2]", &struct{ A []int }{[]int{1, 2}}, + }, { + "a: [1, 2]", + &struct{ A [2]int }{[2]int{1, 2}}, }, { "a: 1", &struct{ B int }{0}, @@ -399,6 +405,12 @@ var unmarshalTests = []struct { { "v: !!float '1.1'", map[string]interface{}{"v": 1.1}, + }, { + "v: !!float 0", + map[string]interface{}{"v": float64(0)}, + }, { + "v: !!float -1", + map[string]interface{}{"v": float64(-1)}, }, { "v: !!null ''", map[string]interface{}{"v": nil}, @@ -728,6 +740,18 @@ func (s *S) TestUnmarshal(c *C) { } } +// TODO(v3): This test should also work when unmarshaling onto an interface{}. +func (s *S) TestUnmarshalFullTimestamp(c *C) { + // Full timestamp in same format as encoded. This is confirmed to be + // properly decoded by Python as a timestamp as well. + var str = "2015-02-24T18:19:39.123456789-03:00" + var t time.Time + err := yaml.Unmarshal([]byte(str), &t) + c.Assert(err, IsNil) + c.Assert(t, Equals, time.Date(2015, 2, 24, 18, 19, 39, 123456789, t.Location())) + c.Assert(t.In(time.UTC), Equals, time.Date(2015, 2, 24, 21, 19, 39, 123456789, time.UTC)) +} + func (s *S) TestDecoderSingleDocument(c *C) { // Test that Decoder.Decode works as expected on // all the unmarshal tests. @@ -813,6 +837,7 @@ var unmarshalErrorTests = []struct { {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, {"v: [A,", "yaml: line 1: did not find expected node content"}, {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a:\n- b: *,", "yaml: line 2: did not find expected alphabetic or numeric character"}, {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, {"value: -", "yaml: block sequence entries are not allowed in this context"}, @@ -1242,6 +1267,35 @@ func (t *textUnmarshaler) UnmarshalText(s []byte) error { return nil } +func (s *S) TestFuzzCrashers(c *C) { + cases := []string{ + // runtime error: index out of range + "\"\\0\\\r\n", + + // should not happen + " 0: [\n] 0", + "? ? \"\n\" 0", + " - {\n000}0", + "0:\n 0: [0\n] 0", + " - \"\n000\"0", + " - \"\n000\"\"", + "0:\n - {\n000}0", + "0:\n - \"\n000\"0", + "0:\n - \"\n000\"\"", + + // runtime error: index out of range + " \ufeff\n", + "? \ufeff\n", + "? \ufeff:\n", + "0: \ufeff\n", + "? \ufeff: \ufeff\n", + } + for _, data := range cases { + var v interface{} + _ = yaml.Unmarshal([]byte(data), &v) + } +} + //var data []byte //func init() { // var err error diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go index 1e730eff6..b3c62b39a 100644 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -131,7 +131,7 @@ func (e *encoder) marshal(tag string, in reflect.Value) { } else { e.structv(tag, in) } - case reflect.Slice: + case reflect.Slice, reflect.Array: if in.Type().Elem() == mapItemType { e.itemsv(tag, in) } else { @@ -328,10 +328,8 @@ func (e *encoder) uintv(tag string, in reflect.Value) { func (e *encoder) timev(tag string, in reflect.Value) { t := in.Interface().(time.Time) - if tag == "" { - tag = yaml_TIMESTAMP_TAG - } - e.emitScalar(t.Format(time.RFC3339Nano), "", tag, yaml_PLAIN_SCALAR_STYLE) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) floatv(tag string, in reflect.Value) { diff --git a/vendor/gopkg.in/yaml.v2/encode_test.go b/vendor/gopkg.in/yaml.v2/encode_test.go index 2e7841146..0324a909c 100644 --- a/vendor/gopkg.in/yaml.v2/encode_test.go +++ b/vendor/gopkg.in/yaml.v2/encode_test.go @@ -147,6 +147,9 @@ var marshalTests = []struct { }, { &struct{ A []int }{[]int{1, 2}}, "a:\n- 1\n- 2\n", + }, { + &struct{ A [2]int }{[2]int{1, 2}}, + "a:\n- 1\n- 2\n", }, { &struct { B int "a" @@ -212,7 +215,7 @@ var marshalTests = []struct { T2: time.Date(2018, 1, 9, 10, 40, 47, 0, time.UTC), T4: newTime(time.Date(2098, 1, 9, 10, 40, 47, 0, time.UTC)), }, - "t2: !!timestamp 2018-01-09T10:40:47Z\nt4: !!timestamp 2098-01-09T10:40:47Z\n", + "t2: 2018-01-09T10:40:47Z\nt4: 2098-01-09T10:40:47Z\n", }, // Nil interface that implements Marshaler. { @@ -329,11 +332,16 @@ var marshalTests = []struct { // time.Time gets a timestamp tag. { map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC)}, - "a: !!timestamp 2015-02-24T18:19:39Z\n", + "a: 2015-02-24T18:19:39Z\n", }, { map[string]*time.Time{"a": newTime(time.Date(2015, 2, 24, 18, 19, 39, 0, time.UTC))}, - "a: !!timestamp 2015-02-24T18:19:39Z\n", + "a: 2015-02-24T18:19:39Z\n", + }, + { + // This is confirmed to be properly decoded in Python (libyaml) without a timestamp tag. + map[string]time.Time{"a": time.Date(2015, 2, 24, 18, 19, 39, 123456789, time.FixedZone("FOO", -3*60*60))}, + "a: 2015-02-24T18:19:39.123456789-03:00\n", }, // Ensure timestamp-like strings are quoted. { @@ -527,8 +535,13 @@ func (s *S) TestSortedOutput(c *C) { "1", "2", "a!10", - "a/2", + "a/0001", + "a/002", + "a/3", "a/10", + "a/11", + "a/0012", + "a/100", "a~10", "ab/1", "b/1", @@ -543,6 +556,8 @@ func (s *S) TestSortedOutput(c *C) { "c2.10", "c10.2", "d1", + "d7", + "d7abc", "d12", "d12a", } diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index f45079171..7c1f5fac3 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { panic("read handler must be set") } + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true } // Return if the buffer contains enough characters. @@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { break } } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } parser.buffer = parser.buffer[:buffer_len] return true } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index ea90bd5e0..6c151db6f 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } } failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) }() @@ -167,12 +180,12 @@ func resolve(tag string, in string) (rtag string, out interface{}) { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) } else { - return yaml_INT_TAG, -intv + return yaml_INT_TAG, intv } } } @@ -211,10 +224,10 @@ func encodeBase64(s string) string { // This is a subset of the formats allowed by the regular expression // defined at http://yaml.org/type/timestamp.html. var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5Z07:00", - "2006-1-2t15:4:5Z07:00", // RFC3339 with lower-case "t". - "2006-1-2 15:4:5", // space separated with no time zone - "2006-1-2", // date only + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" // from the set of examples. } diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 492a9845d..e8e2bcad0 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { required := parser.flow_level == 0 && parser.indent == parser.mark.column - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - // // If the current position may start a simple key, save it. // @@ -2475,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { @@ -2487,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go index 5958822f9..4c45e660a 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool { } var ai, bi int var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') }